gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import vm_states
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
LOG = logging.getLogger(__name__)
ALIAS = "os-admin-actions"
# States usable in resetState action
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
def authorize(context, action_name):
action = 'v3:%s:%s' % (ALIAS, action_name)
extensions.extension_authorizer('compute', action)(context)
class AdminActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminActionsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@extensions.expected_errors((404, 409))
@wsgi.action('pause')
def _pause(self, req, id, body):
"""Permit Admins to pause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
try:
server = self.compute_api.get(ctxt, id, want_objects=True)
self.compute_api.pause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'pause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('unpause')
def _unpause(self, req, id, body):
"""Permit Admins to unpause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
try:
server = self.compute_api.get(ctxt, id, want_objects=True)
self.compute_api.unpause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unpause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('suspend')
def _suspend(self, req, id, body):
"""Permit admins to suspend the server."""
context = req.environ['nova.context']
authorize(context, 'suspend')
try:
server = self.compute_api.get(context, id, want_objects=True)
self.compute_api.suspend(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'suspend')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('resume')
def _resume(self, req, id, body):
"""Permit admins to resume the server from suspend."""
context = req.environ['nova.context']
authorize(context, 'resume')
try:
server = self.compute_api.get(context, id, want_objects=True)
self.compute_api.resume(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resume')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((400, 404, 409, 413))
@wsgi.action('migrate')
def _migrate(self, req, id, body):
"""Permit admins to migrate a server to a new host."""
context = req.environ['nova.context']
authorize(context, 'migrate')
try:
instance = self.compute_api.get(context, id, want_objects=True)
self.compute_api.resize(req.environ['nova.context'], instance)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'migrate')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.FlavorNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.CannotResizeToSameFlavor as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.TooManyInstances as e:
raise exc.HTTPRequestEntityTooLarge(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('reset_network')
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on a server."""
context = req.environ['nova.context']
authorize(context, 'reset_network')
try:
instance = self.compute_api.get(context, id, want_objects=True)
self.compute_api.reset_network(context, instance)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('inject_network_info')
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server."""
context = req.environ['nova.context']
authorize(context, 'inject_network_info')
try:
instance = self.compute_api.get(context, id, want_objects=True)
self.compute_api.inject_network_info(context, instance)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors(404)
@wsgi.action('lock')
def _lock(self, req, id, body):
"""Lock a server instance."""
context = req.environ['nova.context']
authorize(context, 'lock')
try:
instance = self.compute_api.get(context, id, want_objects=True)
self.compute_api.lock(context, instance)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors(404)
@wsgi.action('unlock')
def _unlock(self, req, id, body):
"""Unlock a server instance."""
context = req.environ['nova.context']
authorize(context, 'unlock')
try:
instance = self.compute_api.get(context, id, want_objects=True)
self.compute_api.unlock(context, instance)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((400, 404, 409, 413))
@wsgi.action('create_backup')
def _create_backup(self, req, id, body):
"""Backup a server instance.
Images now have an `image_type` associated with them, which can be
'snapshot' or the backup type, like 'daily' or 'weekly'.
If the image_type is backup-like, then the rotation factor can be
included and that will cause the oldest backups that exceed the
rotation factor to be deleted.
"""
context = req.environ["nova.context"]
authorize(context, 'create_backup')
entity = body["create_backup"]
try:
image_name = entity["name"]
backup_type = entity["backup_type"]
rotation = entity["rotation"]
except KeyError as missing_key:
msg = _("create_backup entity requires %s attribute") % missing_key
raise exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _("Malformed create_backup entity")
raise exc.HTTPBadRequest(explanation=msg)
try:
rotation = int(rotation)
except ValueError:
msg = _("create_backup attribute 'rotation' must be an integer")
raise exc.HTTPBadRequest(explanation=msg)
if rotation < 0:
msg = _("create_backup attribute 'rotation' must be greater "
"than or equal to zero")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(context, id, want_objects=True)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
try:
image = self.compute_api.backup(context, instance, image_name,
backup_type, rotation, extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'create_backup')
resp = webob.Response(status_int=202)
# build location of newly-created image entity if rotation is not zero
if rotation > 0:
image_id = str(image['id'])
image_ref = os.path.join(req.application_url, 'images', image_id)
resp.headers['Location'] = image_ref
return resp
@extensions.expected_errors((400, 404, 409))
@wsgi.action('migrate_live')
def _migrate_live(self, req, id, body):
"""Permit admins to (live) migrate a server to a new host."""
context = req.environ["nova.context"]
authorize(context, 'migrate_live')
try:
block_migration = body["migrate_live"]["block_migration"]
disk_over_commit = body["migrate_live"]["disk_over_commit"]
host = body["migrate_live"]["host"]
except (TypeError, KeyError):
msg = _("host, block_migration and disk_over_commit must "
"be specified for live migration.")
raise exc.HTTPBadRequest(explanation=msg)
try:
block_migration = strutils.bool_from_string(block_migration,
strict=True)
disk_over_commit = strutils.bool_from_string(disk_over_commit,
strict=True)
except ValueError as err:
raise exc.HTTPBadRequest(explanation=str(err))
try:
instance = self.compute_api.get(context, id, want_objects=True)
self.compute_api.live_migrate(context, instance, block_migration,
disk_over_commit, host)
except (exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.NoValidHost,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError) as ex:
raise exc.HTTPBadRequest(explanation=ex.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'migrate_live')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((400, 404))
@wsgi.action('reset_state')
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
authorize(context, 'reset_state')
# Identify the desired state from the body
try:
state = state_map[body["reset_state"]["state"]]
except (TypeError, KeyError):
msg = _("Desired state must be specified. Valid states "
"are: %s") % ', '.join(sorted(state_map.keys()))
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(context, id, want_objects=True)
instance.vm_state = state
instance.task_state = None
instance.save(admin_state_reset=True)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
class AdminActions(extensions.V3APIExtensionBase):
"""Enable admin-only server actions
Actions include: pause, unpause, suspend, resume, migrate,
reset_network, inject_network_info, lock, unlock, create_backup
"""
name = "AdminActions"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/%s/api/v3" % ALIAS
version = 1
def get_controller_extensions(self):
controller = AdminActionsController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Geometric distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class Geometric(distribution.Distribution):
"""Geometric distribution.
The Geometric distribution is parameterized by p, the probability of a
positive event. It represents the probability that in k + 1 Bernoulli trials,
the first k trials failed, before seeing a success.
The pmf of this distribution is:
#### Mathematical Details
```none
pmf(k; p) = (1 - p)**k * p
```
where:
* `p` is the success probability, `0 < p <= 1`, and,
* `k` is a non-negative integer.
"""
def __init__(self,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="Geometric"):
"""Construct Geometric distributions.
Args:
logits: Floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0`
indicates the number of batch dimensions. Each entry represents logits
for the probability of success for independent Geometric distributions
and must be in the range `(-inf, inf]`. Only one of `logits` or `probs`
should be specified.
probs: Positive floating-point `Tensor` with shape `[B1, ..., Bb]`
where `b >= 0` indicates the number of batch dimensions. Each entry
represents the probability of success for independent Geometric
distributions and must be in the range `(0, 1]`. Only one of `logits`
or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[logits, probs]):
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(self._probs)] if validate_args else []):
self._probs = array_ops.identity(self._probs, name="probs")
super(Geometric, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._probs, self._logits],
name=name)
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._probs)
def _batch_shape(self):
return self.probs.get_shape()
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
array_ops.concat([[n], array_ops.shape(self._probs)], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return math_ops.floor(
math_ops.log(sampled) / math_ops.log1p(-self.probs))
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# Whether or not x is integer-form, the following is well-defined.
# However, scipy takes the floor, so we do too.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
return array_ops.where(
x < 0.,
array_ops.zeros_like(x),
-math_ops.expm1((1. + x) * math_ops.log1p(-self.probs)))
def _log_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# For consistency with cdf, we take the floor.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(x)
safe_domain = array_ops.where(
math_ops.equal(x, 0.),
array_ops.zeros_like(probs),
probs)
return x * math_ops.log1p(-safe_domain) + math_ops.log(probs)
def _entropy(self):
probs = self._probs
if self.validate_args:
probs = control_flow_ops.with_dependencies(
[check_ops.assert_less(
probs,
constant_op.constant(1., probs.dtype),
message="Entropy is undefined when logits = inf or probs = 1.")],
probs)
# Claim: entropy(p) = softplus(s)/p - s
# where s=logits and p=probs.
#
# Proof:
#
# entropy(p)
# := -[(1-p)log(1-p) + plog(p)]/p
# = -[log(1-p) + plog(p/(1-p))]/p
# = -[-softplus(s) + ps]/p
# = softplus(s)/p - s
#
# since,
# log[1-sigmoid(s)]
# = log[1/(1+exp(s)]
# = -log[1+exp(s)]
# = -softplus(s)
#
# using the fact that,
# 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
return nn.softplus(self.logits) / probs - self.logits
def _mean(self):
return math_ops.exp(-self.logits)
def _variance(self):
return self._mean() / self.probs
def _mode(self):
return array_ops.zeros(self.batch_shape_tensor(), dtype=self.dtype)
|
|
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
import pytest
from mne.datasets import testing
from mne import (read_label, read_forward_solution, pick_types_forward,
convert_forward_solution)
from mne.label import Label
from mne.simulation.source import simulate_stc, simulate_sparse_stc
from mne.utils import run_tests_if_main
data_path = testing.data_path(download=False)
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
label_names = ['Aud-lh', 'Aud-rh', 'Vis-rh']
label_names_single_hemi = ['Aud-rh', 'Vis-rh']
subjects_dir = op.join(data_path, 'subjects')
def read_forward_solution_meg(*args, **kwargs):
"""Read forward MEG."""
fwd = read_forward_solution(*args)
fwd = convert_forward_solution(fwd, **kwargs)
fwd = pick_types_forward(fwd, meg=True, eeg=False)
return fwd
@testing.requires_testing_data
def test_simulate_stc():
"""Test generation of source estimate."""
fwd = read_forward_solution_meg(fname_fwd, force_fixed=True, use_cps=True)
labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
'%s.label' % label)) for label in label_names]
mylabels = []
for i, label in enumerate(labels):
new_label = Label(vertices=label.vertices,
pos=label.pos,
values=2 * i * np.ones(len(label.values)),
hemi=label.hemi,
comment=label.comment)
mylabels.append(new_label)
n_times = 10
tmin = 0
tstep = 1e-3
stc_data = np.ones((len(labels), n_times))
stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
assert_equal(stc.subject, 'sample')
for label in labels:
if label.hemi == 'lh':
hemi_idx = 0
else:
hemi_idx = 1
idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
idx = np.searchsorted(stc.vertices[hemi_idx], idx)
if hemi_idx == 1:
idx += len(stc.vertices[0])
assert (np.all(stc.data[idx] == 1.0))
assert (stc.data[idx].shape[1] == n_times)
# test with function
def fun(x):
return x ** 2
stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
# the first label has value 0, the second value 2, the third value 6
for i, label in enumerate(labels):
if label.hemi == 'lh':
hemi_idx = 0
else:
hemi_idx = 1
idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
idx = np.searchsorted(stc.vertices[hemi_idx], idx)
if hemi_idx == 1:
idx += len(stc.vertices[0])
res = ((2. * i) ** 2.) * np.ones((len(idx), n_times))
assert_array_almost_equal(stc.data[idx], res)
# degenerate conditions
label_subset = mylabels[:2]
data_subset = stc_data[:2]
stc = simulate_stc(fwd['src'], label_subset, data_subset, tmin, tstep, fun)
pytest.raises(ValueError, simulate_stc, fwd['src'],
label_subset, data_subset[:-1], tmin, tstep, fun)
pytest.raises(RuntimeError, simulate_stc, fwd['src'], label_subset * 2,
np.concatenate([data_subset] * 2, axis=0), tmin, tstep, fun)
@testing.requires_testing_data
def test_simulate_sparse_stc():
"""Test generation of sparse source estimate."""
fwd = read_forward_solution_meg(fname_fwd, force_fixed=True, use_cps=True)
labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
'%s.label' % label)) for label in label_names]
n_times = 10
tmin = 0
tstep = 1e-3
times = np.arange(n_times, dtype=np.float) * tstep + tmin
pytest.raises(ValueError, simulate_sparse_stc, fwd['src'], len(labels),
times, labels=labels, location='center', subject='sample',
subjects_dir=subjects_dir) # no non-zero values
for label in labels:
label.values.fill(1.)
for location in ('random', 'center'):
random_state = 0 if location == 'random' else None
stc_1 = simulate_sparse_stc(fwd['src'], len(labels), times,
labels=labels, random_state=random_state,
location=location,
subjects_dir=subjects_dir)
assert_equal(stc_1.subject, 'sample')
assert (stc_1.data.shape[0] == len(labels))
assert (stc_1.data.shape[1] == n_times)
# make sure we get the same result when using the same seed
stc_2 = simulate_sparse_stc(fwd['src'], len(labels), times,
labels=labels, random_state=random_state,
location=location,
subjects_dir=subjects_dir)
assert_array_equal(stc_1.lh_vertno, stc_2.lh_vertno)
assert_array_equal(stc_1.rh_vertno, stc_2.rh_vertno)
# Degenerate cases
pytest.raises(ValueError, simulate_sparse_stc, fwd['src'], len(labels),
times, labels=labels, location='center', subject='foo',
subjects_dir=subjects_dir) # wrong subject
del fwd['src'][0]['subject_his_id']
pytest.raises(ValueError, simulate_sparse_stc, fwd['src'], len(labels),
times, labels=labels, location='center',
subjects_dir=subjects_dir) # no subject
pytest.raises(ValueError, simulate_sparse_stc, fwd['src'], len(labels),
times, labels=labels, location='foo') # bad location
@testing.requires_testing_data
def test_generate_stc_single_hemi():
"""Test generation of source estimate, single hemi."""
fwd = read_forward_solution_meg(fname_fwd, force_fixed=True, use_cps=True)
labels_single_hemi = [read_label(op.join(data_path, 'MEG', 'sample',
'labels', '%s.label' % label))
for label in label_names_single_hemi]
mylabels = []
for i, label in enumerate(labels_single_hemi):
new_label = Label(vertices=label.vertices,
pos=label.pos,
values=2 * i * np.ones(len(label.values)),
hemi=label.hemi,
comment=label.comment)
mylabels.append(new_label)
n_times = 10
tmin = 0
tstep = 1e-3
stc_data = np.ones((len(labels_single_hemi), n_times))
stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
for label in labels_single_hemi:
if label.hemi == 'lh':
hemi_idx = 0
else:
hemi_idx = 1
idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
idx = np.searchsorted(stc.vertices[hemi_idx], idx)
if hemi_idx == 1:
idx += len(stc.vertices[0])
assert (np.all(stc.data[idx] == 1.0))
assert (stc.data[idx].shape[1] == n_times)
# test with function
def fun(x):
return x ** 2
stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
# the first label has value 0, the second value 2, the third value 6
for i, label in enumerate(labels_single_hemi):
if label.hemi == 'lh':
hemi_idx = 0
else:
hemi_idx = 1
idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
idx = np.searchsorted(stc.vertices[hemi_idx], idx)
if hemi_idx == 1:
idx += len(stc.vertices[0])
res = ((2. * i) ** 2.) * np.ones((len(idx), n_times))
assert_array_almost_equal(stc.data[idx], res)
@testing.requires_testing_data
def test_simulate_sparse_stc_single_hemi():
"""Test generation of sparse source estimate."""
fwd = read_forward_solution_meg(fname_fwd, force_fixed=True, use_cps=True)
n_times = 10
tmin = 0
tstep = 1e-3
times = np.arange(n_times, dtype=np.float) * tstep + tmin
labels_single_hemi = [read_label(op.join(data_path, 'MEG', 'sample',
'labels', '%s.label' % label))
for label in label_names_single_hemi]
stc_1 = simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times,
labels=labels_single_hemi, random_state=0)
assert (stc_1.data.shape[0] == len(labels_single_hemi))
assert (stc_1.data.shape[1] == n_times)
# make sure we get the same result when using the same seed
stc_2 = simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times,
labels=labels_single_hemi, random_state=0)
assert_array_equal(stc_1.lh_vertno, stc_2.lh_vertno)
assert_array_equal(stc_1.rh_vertno, stc_2.rh_vertno)
run_tests_if_main()
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF metric accumulators."""
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
from tensorflow_model_analysis.metrics import metric_util
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.utils import size_estimator
class TFMetricsAccumulator:
"""Accumulator for TF metrics.
Attributes:
inputs: Accumulated batch of inputs. The inputs are stored in a
multi-dimensional list. The first dimension is used to index the
associated output (for single-output models this will only have one item).
The second dimension is used to store the args used by the combiner. For
example the args might be a tf.Example if feeding a model or they might be
(y_true, y_pred, example_weight) for calling update_state directly.
Batching is done on the last dimension.
weights: Accumulated weights. The weights are stored in a multi-dimensional
list where the first dimension is used to index the associated output (for
single-output models this will only have one item). The second dimension
is used to store the accumulated weights for each metric associated with
the output dimension.
size_estimator: Batch size estimator.
desired_batch_size: Desired batch size.
"""
# We really want the batch size to be adaptive like it is in
# beam.BatchElements(), but there isn't an easy way to make it so. For now
# we will limit stored inputs to a max overall byte size.
# TODO(b/73789023): Figure out how to make this batch size dynamic.
_TOTAL_INPUT_BYTE_SIZE_THRESHOLD = 16 << 20 # 16MiB
_DEFAULT_DESIRED_BATCH_SIZE = 1000
__slots__ = ['_inputs', '_weights', '_size_estimator', '_desired_batch_size']
def __init__(self,
input_counts: List[int],
metric_counts: List[int],
size_estimator_fn: Callable[[Any], int],
desired_batch_size: Optional[int] = None):
"""Initializes accumulator using a list of metric counts per output.
Args:
input_counts: Number of inputs associated with each output index.
metric_counts: Number of metrics associated with each output index.
size_estimator_fn: Function to use for estimating the size of the inputs.
desired_batch_size: FOR TESTING ONLY.
"""
# Inputs have shape (num_outputs, num_metrics, num_accumulated_inputs)
self._inputs = []
# Weights have shape (num_outputs, num_metrics)
self._weights = [] # type: List[List[Optional[np.ndarray]]]
for input_count in input_counts:
self._inputs.append(tuple([] for _ in range(input_count)))
for output_metric_count in metric_counts:
self._weights.append([None] * output_metric_count)
self._size_estimator = size_estimator.SizeEstimator(
size_threshold=self._TOTAL_INPUT_BYTE_SIZE_THRESHOLD,
size_fn=size_estimator_fn)
if desired_batch_size and desired_batch_size > 0:
self._desired_batch_size = desired_batch_size
else:
self._desired_batch_size = self._DEFAULT_DESIRED_BATCH_SIZE
def len_inputs(self) -> int:
"""Returns length of inputs."""
return len(self._inputs[0][0])
def add_input(self, output_index: int, *args):
"""Adds new inputs to the lists of input args stored at output_index."""
for i, v in enumerate(args):
self._inputs[output_index][i].append(v)
if v is not None:
self._size_estimator.update(v)
def get_inputs(self, output_index: int) -> Any:
"""Returns input args for output at given offset."""
return self._inputs[output_index]
def clear_inputs(self):
"""Clears currently stored inputs."""
for output_index in range(len(self._inputs)):
for i in range(len(self._inputs[output_index])):
del self._inputs[output_index][i][:]
self._size_estimator.clear()
def add_weights(self, output_index: int, metric_index: int,
weights: np.ndarray):
"""Adds weights for metric at given metric_index and output_index."""
cur_weights = self._weights[output_index][metric_index]
if cur_weights is None:
self._weights[output_index][metric_index] = weights
else:
self._weights[output_index][metric_index] = np.add(cur_weights, weights)
def get_weights(self, output_index: int,
metric_index: int) -> Optional[np.ndarray]:
"""Gets currently stored weights for given metric_index and output_index."""
return self._weights[output_index][metric_index]
def should_flush(self) -> bool:
"""Returns true if size estimator indicates flush is needed."""
return (self.len_inputs() >= self._desired_batch_size or
self._size_estimator.should_flush())
def get_size_estimate(self) -> int:
"""Returns size estimator associated with accumulator."""
return self._size_estimator.get_estimate()
def _numpy_array_size_fn(array: np.ndarray) -> int:
"""Size estimator for numpy arrays."""
return array.nbytes
class TFCompilableMetricsAccumulator(TFMetricsAccumulator):
"""Accumulator for compilable TF metrics.
Attributes:
inputs: Accumulated batch of inputs. The inputs are stored in a
multi-dimensional list. The first dimension is used to index the
associated output (for single-output models this will only have one item).
The second dimension is used to store the args passed to update_state
(i.e. (y_true, y_pred, example_weight)). Batching is done on the last
dimension.calling update_state directly. Batching is done on the last
dimension.
weights: Accumulated weights. The weights are stored in a multi-dimensional
list where the first dimension is used to index the associated output (for
single-output models this will only have one item). The second dimension
is used to store the accumulated weights for each metric associated with
the output dimension.
pad: True if padding needed.
last_dim: Max size of the last dimension of labels or predictions (used with
padding).
size_estimator: Batch size estimator.
desired_batch_size: Desired batch size.
"""
__slots__ = [
'_inputs', '_weights', '_pad', '_pad_to_dim', '_label_padding',
'_prediction_padding', '_size_estimator', '_desired_batch_size'
]
def __init__(self,
padding_options: Optional[config_pb2.PaddingOptions],
metric_counts: List[int],
desired_batch_size: Optional[int] = None):
"""Initializes accumulator using a list of metric counts per output."""
super().__init__(
# Input args of labels, predictions, example_weights for each output.
input_counts=[3] * len(metric_counts),
metric_counts=metric_counts,
size_estimator_fn=_numpy_array_size_fn,
desired_batch_size=desired_batch_size)
self._pad = False
if padding_options is not None:
def get_padding_value(oneof_name):
oneof = padding_options.WhichOneof(oneof_name)
return None if oneof is None else getattr(padding_options, oneof)
self._pad = True
self._label_padding = get_padding_value('label_padding')
self._prediction_padding = get_padding_value('prediction_padding')
self._pad_to_dim = 0
def add_input(self, output_index: int, label: np.ndarray,
prediction: np.ndarray, example_weight: np.ndarray):
"""Adds label, prediction, and example weight to output_index."""
super().add_input(output_index, label, prediction, example_weight)
if self._pad:
self._pad_to_dim = max(self._pad_to_dim, label.shape[-1],
prediction.shape[-1])
def get_inputs(
self, output_index: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Returns labels, predictions, and weights for output at given offset."""
labels, preds, example_weights = super().get_inputs(output_index)
if self._pad:
def pad_value(
name: str, a: np.ndarray,
configured_value: Optional[Union[float, int]]) -> Union[int, float]:
if configured_value is None:
return 0 if a.dtype.kind == 'i' else .0
if isinstance(configured_value, int) and a.dtype.kind == 'i':
return configured_value
if isinstance(configured_value, float) and a.dtype.kind == 'f':
return configured_value
raise ValueError('%s padding is configured to be %s but data is %s' %
(name, type(configured_value), a.dtype))
labels = [
metric_util.pad(l, self._pad_to_dim,
pad_value('label', l, self._label_padding))
for l in labels
]
preds = [
metric_util.pad(p, self._pad_to_dim,
pad_value('prediction', p, self._prediction_padding))
for p in preds
]
return (np.array(labels), np.array(preds), np.array(example_weights))
def clear_inputs(self):
"""Clears currently stored inputs."""
super().clear_inputs()
self._pad_to_dim = 0
|
|
import base64
import requests
import time
from pycrest import version
from pycrest.compat import bytes_, text_
from pycrest.errors import APIException, UnsupportedHTTPMethodException
from requests.adapters import HTTPAdapter
try:
from urllib.parse import urlparse, urlunparse, parse_qsl
except ImportError: # pragma: no cover
from urlparse import urlparse, urlunparse, parse_qsl
try:
from urllib.parse import quote
except ImportError: # pragma: no cover
from urllib import quote
import logging
import re
from pycrest.cache import DictCache, APICache, DummyCache
logger = logging.getLogger("pycrest.eve")
cache_re = re.compile(r'max-age=([0-9]+)')
class APIConnection(object):
def __init__(
self,
additional_headers=None,
user_agent=None,
transport_adapter=None,
**kwargs):
'''Initialises a PyCrest object
Keyword arguments:
additional_headers - a list of http headers that will be sent to the server
user_agent - a custom user agent
cache - an instance of an APICache object that will cache HTTP Requests.
Default is DictCache, pass cache=None to disable caching.
'''
# Set up a Requests Session
session = requests.Session()
if additional_headers is None:
additional_headers = {}
if user_agent is None:
user_agent = "PyCrest/{0} +https://github.com/pycrest/PyCrest"\
.format(version)
if isinstance(transport_adapter, HTTPAdapter):
session.mount('http://', transport_adapter)
session.mount('https://', transport_adapter)
session.headers.update({
"User-Agent": user_agent,
"Accept": "application/json",
})
session.headers.update(additional_headers)
self._session = session
if 'cache' not in kwargs:
self.cache = DictCache()
else:
cache = kwargs.pop('cache')
if isinstance(cache, APICache):
self.cache = cache
elif cache is None:
self.cache = DummyCache()
else:
raise ValueError('Provided cache must implement APICache')
def _parse_parameters(self, resource, params):
'''Creates a dictionary from query_string and `params`
Transforms the `?key=value&...` to a {'key': 'value'} and adds
(or overwrites if already present) the value with the dictionary in
`params`.
'''
# remove params from resource URI (needed for paginated stuff)
parsed_uri = urlparse(resource)
qs = parsed_uri.query
resource = urlunparse(parsed_uri._replace(query=''))
prms = {}
for tup in parse_qsl(qs):
prms[tup[0]] = tup[1]
# params supplied to self.get() override parsed params
for key in params:
prms[key] = params[key]
return resource, prms
def get(self, resource, params={}, caching=True):
logger.debug('Getting resource %s', resource)
resource, prms = self._parse_parameters(resource, params)
# check cache
key = (
resource, frozenset(
self._session.headers.items()), frozenset(
prms.items()))
cached = self.cache.get(key)
if cached and cached['expires'] > time.time():
logger.debug(
'Cache hit for resource %s (params=%s)',
resource,
prms)
return cached['payload']
elif cached:
logger.debug(
'Cache stale for resource %s (params=%s)',
resource,
prms)
self.cache.invalidate(key)
else:
logger.debug(
'Cache miss for resource %s (params=%s', resource, prms)
logger.debug('Getting resource %s (params=%s)', resource, prms)
res = self._session.get(resource, params=prms)
if res.status_code != 200:
raise APIException(
resource,
res.status_code,
res.json()
)
ret = res.json()
# cache result only if caching = True (default)
key = (
resource, frozenset(
self._session.headers.items()), frozenset(
prms.items()))
expires = self._get_expires(res)
if expires > 0 and caching:
self.cache.put(
key, {
'expires': time.time() + expires, 'payload': ret})
return ret
# post is not idempotent so there should be no caching
def post(self, resource, data={}):
logger.debug('Posting resource %s (data=%s)', resource, data)
res = self._session.post(resource, data=data)
if res.status_code not in [200, 201]:
raise APIException(
resource,
res.status_code,
res.json()
)
return {}
# put is not idempotent so there should be no caching
def put(self, resource, data={}):
logger.debug('Putting resource %s (data=%s)', resource, data)
res = self._session.put(resource, data=data)
if res.status_code != 200:
raise APIException(
resource,
res.status_code,
res.json()
)
return {}
# delete is not idempotent so there should be no caching
def delete(self, resource):
logger.debug('Deleting resource %s', resource)
res = self._session.delete(resource)
if res.status_code != 200:
raise APIException(
resource,
res.status_code,
res.json()
)
return {}
def _get_expires(self, response):
if 'Cache-Control' not in response.headers:
return 0
if any([s in response.headers['Cache-Control']
for s in ['no-cache', 'no-store']]):
return 0
match = cache_re.search(response.headers['Cache-Control'])
if match:
return int(match.group(1))
return 0
class EVE(APIConnection):
def __init__(self, **kwargs):
self.api_key = kwargs.pop('api_key', None)
self.client_id = kwargs.pop('client_id', None)
self.redirect_uri = kwargs.pop('redirect_uri', None)
if kwargs.pop('testing', False):
self._endpoint = "https://api-sisi.testeveonline.com/"
self._image_server = "https://image.testeveonline.com/"
self._oauth_endpoint = "https://sisilogin.testeveonline.com/oauth"
else:
self._endpoint = "https://crest-tq.eveonline.com/"
self._image_server = "https://imageserver.eveonline.com/"
self._oauth_endpoint = "https://login.eveonline.com/oauth"
self._cache = {}
self._data = None
APIConnection.__init__(self, **kwargs)
def __call__(self, caching=True):
if not self._data:
self._data = APIObject(self.get(self._endpoint,
caching=caching),
self)
return self._data
def __getattr__(self, item):
return self._data.__getattr__(item)
def auth_uri(self, scopes=None, state=None):
s = [] if not scopes else scopes
return "%s/authorize?response_type=code&redirect_uri=%s&client_id=%s%s%s" % (
self._oauth_endpoint,
quote(self.redirect_uri, safe=''),
self.client_id,
"&scope=%s" % '+'.join(s) if scopes else '',
"&state=%s" % state if state else ''
)
def _authorize(self, params):
auth = text_(
base64.b64encode(
bytes_(
"%s:%s" %
(self.client_id, self.api_key))))
headers = {"Authorization": "Basic %s" % auth}
resource = "%s/token" % self._oauth_endpoint
res = self._session.post(
resource,
params=params,
headers=headers)
if res.status_code != 200:
raise APIException(
resource,
res.status_code,
res.json()
)
return res.json()
def authorize(self, code):
res = self._authorize(
params={
"grant_type": "authorization_code",
"code": code})
return AuthedConnection(res,
self._endpoint,
self._oauth_endpoint,
self.client_id,
self.api_key,
cache=self.cache)
def refr_authorize(self, refresh_token):
res = self._authorize(
params={
"grant_type": "refresh_token",
"refresh_token": refresh_token})
return AuthedConnection({'access_token': res['access_token'],
'refresh_token': refresh_token,
'expires_in': res['expires_in']},
self._endpoint,
self._oauth_endpoint,
self.client_id,
self.api_key,
cache=self.cache)
def temptoken_authorize(self, access_token, expires_in, refresh_token):
return AuthedConnection({'access_token': access_token,
'refresh_token': refresh_token,
'expires_in': expires_in},
self._endpoint,
self._oauth_endpoint,
self.client_id,
self.api_key,
cache=self.cache)
class AuthedConnection(EVE):
def __init__(
self,
res,
endpoint,
oauth_endpoint,
client_id=None,
api_key=None,
**kwargs):
EVE.__init__(self, **kwargs)
self.client_id = client_id
self.api_key = api_key
self.token = res['access_token']
self.refresh_token = res['refresh_token']
self.expires = int(time.time()) + res['expires_in']
self._oauth_endpoint = oauth_endpoint
self._endpoint = endpoint
self._session.headers.update(
{"Authorization": "Bearer %s" % self.token})
def __call__(self, caching=True):
if not self._data:
self._data = APIObject(self.get(self._endpoint, caching=caching), self)
return self._data
def whoami(self):
if 'whoami' not in self._cache:
self._cache['whoami'] = self.get(
"%s/verify" %
self._oauth_endpoint)
return self._cache['whoami']
def refresh(self):
res = self._authorize(
params={
"grant_type": "refresh_token",
"refresh_token": self.refresh_token})
self.token = res['access_token']
self.expires = int(time.time()) + res['expires_in']
self._session.headers.update(
{"Authorization": "Bearer %s" % self.token})
return self # for backwards compatibility
def get(self, resource, params={}, caching=True):
if int(time.time()) >= self.expires:
self.refresh()
return super(self.__class__, self).get(resource, params, caching)
class APIObject(object):
def __init__(self, parent, connection):
self._dict = {}
self.connection = connection
for k, v in parent.items():
if isinstance(v, dict):
self._dict[k] = APIObject(v, connection)
elif isinstance(v, list):
self._dict[k] = self._wrap_list(v)
else:
self._dict[k] = v
def _wrap_list(self, list_):
new = []
for item in list_:
if isinstance(item, dict):
new.append(APIObject(item, self.connection))
elif isinstance(item, list):
new.append(self._wrap_list(item))
else:
new.append(item)
return new
def __getattr__(self, item):
if item in self._dict:
return self._dict[item]
raise AttributeError(item)
def __call__(self, **kwargs):
"""carries out a CREST request
__call__ takes two keyword parameters: method and data
method contains the http request method and defaults to 'get'
but could also be 'post', 'put', or 'delete'
data contains any arguments that will be passed with the request -
it could be a dictionary which contains parameters
and is passed via the url for 'get' requests and as form-encoded
data for 'post' or 'put' requests. It could also be a string if
another format of data (e.g. via json.dumps()) must be passed in
a 'post' or 'put' request. This parameter has no effect on
'delete' requests.
"""
# Caching is now handled by APIConnection
if 'href' in self._dict:
method = kwargs.pop('method', 'get') # default to get: historic behaviour
data = kwargs.pop('data', {})
caching = kwargs.pop('caching', True) # default caching to true, for get requests
# retain compatibility with historic method of passing parameters.
# Slightly unsatisfactory - what if data is dict-like but not a dict?
if isinstance(data, dict):
for arg in kwargs:
data[arg] = kwargs[arg]
if method == 'post':
return APIObject(self.connection.post(self._dict['href'], data=data), self.connection)
elif method == 'put':
return APIObject(self.connection.put(self._dict['href'], data=data), self.connection)
elif method == 'delete':
return APIObject(self.connection.delete(self._dict['href']), self.connection)
elif method == 'get':
return APIObject(self.connection.get(self._dict['href'],
params=data,
caching=caching),
self.connection)
else:
raise UnsupportedHTTPMethodException(method)
else:
return self
def __str__(self): # pragma: no cover
return self._dict.__str__()
def __repr__(self): # pragma: no cover
return self._dict.__repr__()
|
|
import urllib3
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
from datetime import datetime
# Disable insecure warnings
urllib3.disable_warnings()
class DetectionRatio:
malicious = 0
total = 0
def __init__(self, last_analysis_stats: dict):
self.malicious = last_analysis_stats['malicious']
self.total = last_analysis_stats['harmless'] + \
last_analysis_stats['suspicious'] + \
last_analysis_stats['undetected'] + \
last_analysis_stats['malicious']
def __repr__(self):
return f'{self.malicious}/{self.total}'
class Client(BaseClient):
def get_api_indicators(self,
query_filter: Optional[str] = None,
limit: Optional[int] = 10):
return self._http_request(
'GET',
'intelligence/hunting_notification_files',
params=assign_params(
filter=query_filter,
limit=min(limit, 40),
)
)
def fetch_indicators(self,
limit: Optional[int] = 10,
filter_tag: Optional[str] = None,
fetch_command: bool = False) -> List:
"""Retrieves all entries from the feed.
Returns:
A list of objects, containing the indicators.
"""
result = []
query_filter = ''
if isinstance(filter_tag, str):
query_filter = f'tag:"{filter_tag}"'
if fetch_command:
if last_run := self.get_last_run():
query_filter = f'{query_filter} {last_run}'
response = self.get_api_indicators(query_filter.strip(), limit)
try:
for indicator in response.get('data', []):
result.append({
'data': indicator,
'type': 'file',
'FeedURL': self._base_url
})
except ValueError as err:
demisto.debug(str(err))
raise ValueError(f'Could not parse returned data as indicator. \n\nError message: {err}')
if fetch_command:
self.set_last_run()
return result
@staticmethod
def set_last_run():
"""
Returns: Current timestamp
"""
current_time = datetime.now()
current_timestamp = datetime.timestamp(current_time)
timestamp = str(int(current_timestamp))
demisto.setIntegrationContext({'last_run': timestamp})
@staticmethod
def get_last_run() -> str:
""" Gets last run time in timestamp
Returns:
last run in timestamp, or '' if no last run
"""
if last_run := demisto.getIntegrationContext().get('last_run'):
demisto.info(f'get last_run: {last_run}')
params = f'date:{last_run}+'
else:
params = ''
return params
def test_module(client: Client, args: dict) -> str:
try:
client.fetch_indicators()
except Exception:
raise Exception("Could not fetch VT livehunt Feed\n"
"\nCheck your API key and your connection to VirusTotal.")
return 'ok'
def fetch_indicators_command(client: Client,
tlp_color: Optional[str] = None,
feed_tags: List = [],
limit: Optional[int] = 10,
filter_tag: Optional[str] = None,
fetch_command: bool = False) -> List[Dict]:
"""Retrieves indicators from the feed
Args:
client (Client): Client object with request
tlp_color (str): Traffic Light Protocol color
feed_tags (list): tags to assign fetched indicators
limit (int): limit the results
filter_tag (string): filter response by ruleset name
Returns:
Indicators.
"""
indicators = []
raw_indicators = client.fetch_indicators(limit, filter_tag,
fetch_command=fetch_command)
# extract values from iterator
for item in raw_indicators:
value_ = item.get('data')
type_ = FeedIndicatorType.File
attributes = value_.get('attributes', {})
context_attributes = value_.get('context_attributes', {})
raw_data = {
'value': value_,
'type': type_,
}
detection_ratio = DetectionRatio(attributes.get('last_analysis_stats'))
# Create indicator object for each value.
# The object consists of a dictionary with required and optional keys
# and values, as described blow.
indicator_obj = {
# The indicator value.
'value': attributes['sha256'],
# The indicator type as defined in Cortex XSOAR.
# One can use the FeedIndicatorType class under CommonServerPython
# to populate this field.
'type': type_,
# The name of the service supplying this feed.
'service': 'VirusTotal',
# A dictionary that maps values to existing indicator fields defined
# in Cortex XSOAR.
# One can use this section in order to map custom indicator fields
# previously defined
# in Cortex XSOAR to their values.
'fields': {
'md5': attributes.get('md5'),
'sha1': attributes.get('sha1'),
'sha256': attributes.get('sha256'),
'ssdeep': attributes.get('ssdeep'),
'fileextension': attributes.get('type_extension'),
'filetype': attributes.get('type_tag'),
'imphash': attributes.get('pe_info', {}).get('imphash'),
'firstseenbysource': attributes.get('first_submission_date'),
'lastseenbysource': attributes.get('last_submission_date'),
'creationdate': attributes.get('creation_date'),
'updateddate': attributes.get('last_modification_date'),
'detectionengines': detection_ratio.total,
'positivedetections': detection_ratio.malicious,
'displayname': attributes.get('meaningful_name'),
'name': attributes.get('meaningful_name'),
'size': attributes.get('size'),
},
# A dictionary of the raw data returned from the feed source about
# the indicator.
'rawJSON': raw_data,
'sha256': attributes['sha256'],
'detections': str(detection_ratio),
'fileType': attributes.get('type_description'),
'rulesetName': context_attributes.get('ruleset_name'),
'ruleName': context_attributes.get('rule_name'),
}
if feed_tags:
indicator_obj['fields']['tags'] = feed_tags
if tlp_color:
indicator_obj['fields']['trafficlightprotocol'] = tlp_color
indicators.append(indicator_obj)
return indicators
def get_indicators_command(client: Client,
params: Dict[str, str],
args: Dict[str, str]
) -> CommandResults:
"""Wrapper for retrieving indicators from the feed to the war-room.
Args:
client: Client object with request
params: demisto.params()
args: demisto.args()
Returns:
Outputs.
"""
limit = int(args.get('limit', 10))
filter_tag = args.get('filter')
tlp_color = params.get('tlp_color')
feed_tags = argToList(params.get('feedTags', ''))
indicators = fetch_indicators_command(client, tlp_color,
feed_tags, limit, filter_tag)
human_readable = tableToMarkdown('Indicators from VirusTotal Livehunt Feed:',
indicators,
headers=['sha256',
'detections',
'fileType',
'rulesetName',
'ruleName'],
headerTransform=string_to_table_header,
removeNull=True)
return CommandResults(
readable_output=human_readable,
outputs_prefix='',
outputs_key_field='',
raw_response=indicators,
outputs={},
)
def reset_last_run():
"""
Reset the last run from the integration context
"""
demisto.setIntegrationContext({})
return CommandResults(readable_output='Fetch history deleted successfully')
def main():
"""
main function, parses params and runs command functions
"""
params = demisto.params()
feed_tags = argToList(params.get('feedTags'))
tlp_color = params.get('tlp_color')
limit = int(params.get('limit', 10))
filter_tag = params.get('filter')
# If your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
insecure = not params.get('insecure', False)
# If your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
client = Client(
base_url='https://www.virustotal.com/api/v3/',
verify=insecure,
proxy=proxy,
headers={
'x-apikey': params['credentials']['password'],
'x-tool': 'CortexVirusTotalLivehuntFeed',
}
)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client, {}))
elif command == 'vt-livehunt-get-indicators':
# This is the command that fetches a limited number of indicators
# from the feed source and displays them in the war room.
return_results(get_indicators_command(client, params, demisto.args()))
elif command == "vt-reset-fetch-indicators":
return_results(reset_last_run())
elif command == 'fetch-indicators':
# This is the command that initiates a request to the feed endpoint
# and create new indicators objects from the data fetched. If the
# integration instance is configured to fetch indicators, then this
# is the command that will be executed at the specified feed fetch
# interval.
indicators = fetch_indicators_command(client,
tlp_color,
feed_tags,
limit,
filter_tag,
fetch_command=True)
for iter_ in batch(indicators, batch_size=2000):
demisto.createIndicators(iter_)
else:
raise NotImplementedError(f'Command {command} is not implemented.')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # Print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
|
#!/usr/bin/env python3
import argparse
import atexit
import json
import logging
import os
import re
import shutil
import signal
import sys
import time
from sh import rsync
from sh import uptime
# initialize logging
logging.basicConfig(
format='%(message)s',
# TODO: bump this down by default
level=logging.INFO,
)
# the format of backup directory timestamps. the time format is assumed to
# always output a string of the same length!
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
TIME_FORMAT_LENGTH = len(time.strftime(TIME_FORMAT))
CURRENT_LINK_NAME = 'current'
BACKUP_PREFIX = 'backup-'
INCOMPLETE_PREFIX = 'incomplete-'
# NOTE: this script doesn't work as intended when backing up a Linux FS to an
# NTFS drive, thanks to permissions being incompatible. rsync detects all files
# as changed, then does a full sync every time!
def get_config():
'''Parse and return the current command-line arguments.'''
parser = argparse.ArgumentParser('backup', description='''
Make an incremental system backup to a directory. The destination
directory is filled with a sequence of folders that maintains an
incremental history of all backups made.
''')
# standardize a path to an absolute, normalized path
norm = lambda path: os.path.abspath(os.path.normpath(path))
parser.add_argument('source', metavar='SOURCE', type=norm, default='/',
help='The source directory to back up.')
parser.add_argument('destination', metavar='DEST', type=norm,
help='The destination directory to create the backup history in.')
parser.add_argument('-k', '--days-to-keep', type=int, default=365,
help='The number of days to keep old backups.')
return parser.parse_args()
def lock_dest(dest, name='backup.lock'):
'''
Create a lock directory in the destination directory. Raises an IOError if the
lock could not be acquired, i.e. the destination directory is already locked.
If the directory was locked before system boot, the directory is re-locked for
this run, since the prior process couldn't still be running after a shutdown!
Returns a function that removes the lock directory when called.
'''
# attempt to make a lock directory
lock_dir = os.path.join(dest, name)
info_path = os.path.join(lock_dir, 'info')
# see if a lock already exists by trying to read the file for it
try:
logging.debug('Looking for existing lock directory...')
data = None
with open(info_path, 'r') as info_file:
data = json.load(info_file)
# figure out when the system booted and when the directory was locked
boot_time = time.mktime(
time.strptime(uptime(since=True).strip(), '%Y-%m-%d %H:%M:%S'))
lock_time = time.mktime(time.strptime(data['start_time'], TIME_FORMAT))
# remove the lock directory if it was created before the system booted,
# since that process couldn't possibly still be running.
if boot_time > lock_time:
logging.info('Removing old lock directory (locked on %s, booted on %s)...',
time.strftime(TIME_FORMAT, time.localtime(lock_time)),
time.strftime(TIME_FORMAT, time.localtime(boot_time)))
shutil.rmtree(lock_dir)
else:
logging.debug('Lock file exists and is still valid (locked on %s)',
time.strftime(TIME_FORMAT, time.localtime(lock_time)))
except FileNotFoundError:
# do nothing since there was presumably no existing lock directory
logging.debug('No old lock directory found')
pass
try:
os.mkdir(lock_dir)
# write some useful info to our file, so others can see our status while
# we're running and so this program can determine if the lock has "expired",
# i.e. the system rebooted while the directory was still locked.
with open(info_path, 'w') as info_file:
json.dump({
'pid': os.getpid(),
'start_time': time.strftime(TIME_FORMAT),
}, info_file, indent=' ', sort_keys=True)
except FileExistsError as e:
raise IOError("Could not acquire lock in '" + dest + "'")
# return a function that will remove the lock directory when called
# TODO: there's probably a race condition in here somewhere... fix it!
return lambda: (
os.path.exists(info_path) and
os.path.exists(lock_dir) and
shutil.rmtree(lock_dir)
)
def parse_backup_time(path):
'''
Parse the timestamp from a backup directory path. Returns the parsed Unix
timestamp or None if none could be parsed.
'''
try:
return time.mktime(time.strptime(path[-TIME_FORMAT_LENGTH:], TIME_FORMAT))
except ValueError:
# fail if we couldn't parse a timestamp
return None
def remove_old_backups(dest, timestamp):
'''
Remove backup folders from `dest` that are older than `timestamp`, a Unix
timestamp.
'''
logging.info("Removing backups older than %s...",
time.strftime(TIME_FORMAT, time.localtime(timestamp)))
# keep track of how many we've removed for logging purposes
removed = 0
for path in os.listdir(dest):
# normalize the path
path = os.path.abspath(os.path.normpath(os.path.join(dest, path)))
fname = os.path.basename(path)
logging.debug(" Checking '%s'", path)
# only consider backup directories
if os.path.isdir(path) and fname.startswith(BACKUP_PREFIX):
logging.debug(" '%s' was a dir and started with '%s'",
fname, BACKUP_PREFIX)
backup_timestamp = parse_backup_time(path)
logging.debug(" Parsed timestamp %d", backup_timestamp)
# remove the backup folder if we got a timestamp and it's too old
if backup_timestamp is None:
logging.error(" Failed to parse backup timestamp from '%s'", fname)
elif backup_timestamp - timestamp <= 0:
logging.info(" Removing '%s'", fname)
shutil.rmtree(path)
removed += 1
else:
logging.debug(" Skipping '%s'", fname)
logging.debug('')
logging.info('Removed %d old backup%s.', removed, '' if removed == 1 else 's')
def prune_incomplete_backups(dest):
'''
Removes incomplete backup folders from the given directory if a complete
backup exists that is newer than they are.
'''
newest_timestamp = None
files = [os.path.abspath(os.path.join(dest, p)) for p in os.listdir(dest)]
logging.info('Pruning incomplete backups...')
logging.debug(' Finding newest backup directory...')
for path in files:
fname = os.path.basename(path)
logging.debug(" Checking '%s'", path)
# find the newest backup directory
if os.path.isdir(path) and fname.startswith(BACKUP_PREFIX):
fname = os.path.basename(path)
logging.debug(" '%s' was a dir and started with '%s'",
fname, BACKUP_PREFIX)
backup_timestamp = parse_backup_time(path)
if newest_timestamp is None:
logging.debug(" Setting initial newest directory to '%s'", fname)
newest_timestamp = backup_timestamp
elif backup_timestamp is None:
logging.error(" Failed to parse backup timestamp from '%s'", fname)
elif backup_timestamp > newest_timestamp:
logging.debug(" Found newer backup directory '%s'", fname)
newest_timestamp = backup_timestamp
else:
logging.debug(" Skipping '%s'", fname)
logging.debug("")
logging.info(" Newest backup directory time is %s",
time.strftime(TIME_FORMAT, time.localtime(newest_timestamp)))
logging.debug("")
# if we found the newest backup, remove older incomplete backups
incomplete = 0
pruned = 0
if newest_timestamp is not None:
logging.info(" Searching for old incomplete backups...")
for path in files:
fname = os.path.basename(path)
logging.debug(" Checking '%s'", path)
if os.path.isdir(path) and fname.startswith(INCOMPLETE_PREFIX):
# track that we found an incomplete backup
incomplete += 1
logging.debug(" '%s' was a dir and started with '%s'",
fname, INCOMPLETE_PREFIX)
# remove the incomplete folder if it's older than the newest backup
incomplete_timestamp = parse_backup_time(path)
logging.debug(" Parsed timestamp %d", incomplete_timestamp)
if incomplete_timestamp is None:
logging.error(" Failed to parse backup timestamp from '%s'", fname)
elif incomplete_timestamp - newest_timestamp < 0:
logging.info(" Removing '%s'", fname)
pruned += 1
shutil.rmtree(path)
else:
logging.debug(" Skipping '%s'", fname)
logging.debug('')
else:
# this shouldn't happen, as we should have at least the current backup
logging.error(' No backup directories found!')
logging.info(' Found %d incomplete backup%s',
incomplete, '' if incomplete == 1 else 's')
logging.info('Pruned %d incomplete backup%s',
pruned, '' if pruned == 1 else 's')
def main():
config = get_config()
dest = config.destination
src = config.source
# ensure the destination directory exists
os.makedirs(dest, exist_ok=True)
# lock it so only we can use it
unlock_dest = None
try:
unlock_dest = lock_dest(dest)
except IOError as e:
logging.info('Backup already in progress, exiting.')
return 0
# remove the lock when exiting under normal circumstances
atexit.register(unlock_dest)
# make sure we remove the lock on exit, now that we've acquired it. we catch
# these signals explicitly since it virtually guarantees that we'll remove the
# lock on exit, unless something catastrophic happens. we have to wrap the
# function since handler functions must take two arguments, otherwise they
# error.
unlock_dest_handler = lambda a, b: unlock_dest()
signal.signal(signal.SIGABRT, unlock_dest_handler)
signal.signal(signal.SIGINT, unlock_dest_handler)
signal.signal(signal.SIGSEGV, unlock_dest_handler)
signal.signal(signal.SIGTERM, unlock_dest_handler)
# get a timestamp for the backup directory
backup_timestamp = time.strftime(TIME_FORMAT)
# get names for our backup directories
incomplete_backup_dir = os.path.join(dest,
INCOMPLETE_PREFIX + BACKUP_PREFIX + backup_timestamp)
complete_backup_dir = os.path.join(dest, BACKUP_PREFIX + backup_timestamp)
current_link = os.path.join(dest, CURRENT_LINK_NAME)
logging.info("Backing up '%s' to '%s'...", src, dest)
# start the backup
rsync_result = rsync(
'--exclude', '/dev/*',
'--exclude', '/dev/*',
'--exclude', '/home/*/.cache',
'--exclude', '/home/*/.config/google-chrome-*/Default/File System/*',
'--exclude', '/home/*/.config/google-chrome-*/Default/GPUCache/*',
'--exclude', '/home/*/.dropbox/logs/*',
'--exclude', '/home/*/.local/share/Trash',
'--exclude', '/home/*/.mozilla/firefox/*/Cache',
'--exclude', '/home/*/.thumbnails',
'--exclude', '/mnt/*',
'--exclude', '/proc/*',
'--exclude', '/run/*',
'--exclude', '/sys/*',
'--exclude', '/tmp/*',
'--exclude', '/var/lock/*',
'--exclude', '/var/log/journal/*',
'--exclude', '/var/run/*',
'--exclude', '/var/tmp/*',
'--exclude', dest,
'--include', '/home',
# backup from the source to our 'incomplete' directory
src, incomplete_backup_dir,
# this does the incremental magic
link_dest=current_link,
# prettify output a bit
itemize_changes=True,
human_readable=True,
# look through all subdirectories of the given one
recursive=True,
# include all file types and duplicate all permissions
links=True,
perms=True,
times=True,
group=True,
owner=True,
devices=True,
specials=True,
executability=True,
# log all rsync output through our logger
_out=logging.info,
_err=logging.error,
)
# bail if the backup didn't succeed
if rsync_result.exit_code != 0:
logging.error('rsync process exited with code %d, backup failed!',
rsync_result.exit_code)
return rsync_result.exit_code
else:
logging.info('Backup was successful')
# mark the backup as 'complete'
logging.info('Marking the backup as complete...')
os.rename(incomplete_backup_dir, complete_backup_dir)
# remove any existing symlink and create a new one
logging.info('Updating link to point at the current backup...')
current_link_path = os.path.join(dest, CURRENT_LINK_NAME)
if os.path.lexists(current_link_path):
logging.debug("Removing existing link at '%s'", current_link_path)
os.unlink(current_link_path)
# makes sure the link is relative, so we can move the backup folder without
# breaking the link.
os.symlink(os.path.basename(complete_backup_dir), current_link_path)
# remove old backup folders
keep_duration_seconds = 60 * 60 * 24 * config.days_to_keep
remove_old_backups(dest, time.time() - keep_duration_seconds)
# prune incomplete backup folders once a newer backup exists
prune_incomplete_backups(dest)
return 0
if __name__ == '__main__':
# exit with whatever code main returns, defaulting to success
sys.exit(main())
|
|
import datetime
import math
import os
from collections import namedtuple
from urllib.parse import urlparse
from django.conf import settings
from django.contrib.sitemaps import Sitemap as DjangoSitemap
from django.db.models import Count, Max, Q
from django.template import loader
from django.utils.functional import cached_property
from django.urls import reverse
from olympia import amo
from olympia.addons.models import Addon, AddonCategory
from olympia.amo.reverse import get_url_prefix, override_url_prefix
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.constants.categories import CATEGORIES
from olympia.constants.promoted import RECOMMENDED
from olympia.bandwagon.models import Collection
from olympia.promoted.models import PromotedAddon
from olympia.tags.models import AddonTag, Tag
from olympia.users.models import UserProfile
# These constants are from:
# https://github.com/mozilla/addons-frontend/blob/master/src/amo/reducers/addonsByAuthors.js
EXTENSIONS_BY_AUTHORS_PAGE_SIZE = 10
THEMES_BY_AUTHORS_PAGE_SIZE = 12
# top 10 locales by visitor from GA (as of May 2021)
FRONTEND_LANGUAGES = [
'de',
'en-GB',
'en-US',
'es',
'fr',
'ja',
'pl',
'pt-BR',
'ru',
'zh-CN',
]
class LazyTupleList:
"""Lazily emulates a generated list like:
[
(item_a, item_b)
for item_b in list_b
for item_a in list_a
]
"""
def __init__(self, list_a, list_b):
self.list_a = list_a
self.list_b = list_b
def __len__(self):
return len(self.list_a) * len(self.list_b)
def __getitem__(self, key):
a_len = len(self.list_a)
def get(index):
return (self.list_a[index % a_len], self.list_b[index // a_len])
return (
[get(idx) for idx in range(key.start, key.stop, key.step or 1)]
if isinstance(key, slice)
else get(key)
)
class Sitemap(DjangoSitemap):
limit = 2000
i18n = True
languages = FRONTEND_LANGUAGES
alternates = True
# x_default = False # TODO: enable this when we can validate it works well
_cached_items = []
protocol = urlparse(settings.EXTERNAL_SITE_URL).scheme
def _location(self, item, force_lang_code=None):
# modified from Django implementation - we don't rely on locale for urls
if self.i18n:
obj, lang_code = item
# Doing .replace is hacky, but `override_url_prefix` is slow at scale
return self.location(obj).replace(
settings.LANGUAGE_CODE, force_lang_code or lang_code, 1
)
return self.location(item)
def _items(self):
items = self.items()
if self.i18n:
# Create (item, lang_code) tuples for all items and languages.
# This is necessary to paginate with all languages already considered.
return LazyTupleList(items, self._languages())
return items
def items(self):
return self._cached_items
def get_domain(self, site):
if not site:
if not hasattr(self, 'domain'):
self.domain = urlparse(settings.EXTERNAL_SITE_URL).netloc
return self.domain
return super().get_domain(site=site)
def get_urls(self, page=1, site=None, protocol=None, *, app_name=None):
with override_url_prefix(app_name=app_name):
return super().get_urls(page=page, site=site, protocol=protocol)
@cached_property
def template(self):
return loader.get_template('sitemap.xml')
def render(self, app_name, page):
context = {'urlset': self.get_urls(page=page, app_name=app_name)}
return self.template.render(context)
@property
def _current_app(self):
return amo.APPS[get_url_prefix().app]
def get_android_promoted_addons():
return PromotedAddon.objects.filter(
Q(application_id=amo.ANDROID.id) | Q(application_id__isnull=True),
group_id=RECOMMENDED.id,
addon___current_version__promoted_approvals__application_id=(amo.ANDROID.id),
addon___current_version__promoted_approvals__group_id=RECOMMENDED.id,
)
class AddonSitemap(Sitemap):
item_tuple = namedtuple('Item', ['last_updated', 'url', 'page'], defaults=(1,))
@cached_property
def _cached_items(self):
current_app = self._current_app
addons_qs = Addon.objects.public().filter(
_current_version__apps__application=current_app.id
)
# android is currently limited to a small number of recommended addons, so get
# the list of those and filter further
if current_app == amo.ANDROID:
promoted_addon_ids = get_android_promoted_addons().values_list(
'addon_id', flat=True
)
addons_qs = addons_qs.filter(id__in=promoted_addon_ids)
addons = list(
addons_qs.order_by('-last_updated')
.values_list(
'last_updated',
'slug',
'text_ratings_count',
named=True,
)
.iterator()
)
items = [
self.item_tuple(
addon.last_updated,
reverse('addons.detail', args=[addon.slug]),
)
for addon in addons
]
# add pages for ratings - and extra pages when needed to paginate
page_size = settings.REST_FRAMEWORK['PAGE_SIZE']
for addon in addons:
pages_needed = math.ceil((addon.text_ratings_count or 1) / page_size)
items.extend(
self.item_tuple(
addon.last_updated,
reverse('addons.ratings.list', args=[addon.slug]),
page,
)
for page in range(1, pages_needed + 1)
)
return items
def lastmod(self, item):
return item.last_updated
def location(self, item):
return item.url + (f'?page={item.page}' if item.page > 1 else '')
class AMOSitemap(Sitemap):
lastmod = datetime.datetime.now()
_cached_items = [
# frontend pages
('home', amo.FIREFOX),
('home', amo.ANDROID),
('pages.about', None),
('pages.review_guide', None),
('browse.extensions', amo.FIREFOX),
('browse.themes', amo.FIREFOX),
('browse.language-tools', amo.FIREFOX),
# server pages
('devhub.index', None),
('apps.appversions', amo.FIREFOX),
('apps.appversions', amo.ANDROID),
]
def location(self, item):
urlname, app = item
if app:
with override_url_prefix(app_name=app.short):
return reverse(urlname)
else:
return reverse(urlname)
class CategoriesSitemap(Sitemap):
lastmod = datetime.datetime.now()
@cached_property
def _cached_items(self):
page_size = settings.REST_FRAMEWORK['PAGE_SIZE']
page_count_max = settings.ES_MAX_RESULT_WINDOW // page_size
def additems(type):
items = []
for category in CATEGORIES[current_app.id][type].values():
items.append((category, 1))
pages_needed = min(
math.ceil(addon_counts.get(category.id, 1) / page_size),
page_count_max,
)
for page in range(2, pages_needed + 1):
items.append((category, page))
return items
current_app = self._current_app
counts_qs = (
AddonCategory.objects.filter(
addon___current_version__isnull=False,
addon___current_version__apps__application=current_app.id,
addon__disabled_by_user=False,
addon__status__in=amo.REVIEWED_STATUSES,
)
.values('category_id')
.annotate(count=Count('addon_id'))
)
addon_counts = {cat['category_id']: cat['count'] for cat in counts_qs}
items = additems(amo.ADDON_EXTENSION)
if current_app == amo.FIREFOX:
items.extend(additems(amo.ADDON_STATICTHEME))
return items
def location(self, item):
(category, page) = item
return category.get_url_path() + (f'?page={page}' if page > 1 else '')
class CollectionSitemap(Sitemap):
@cached_property
def _cached_items(self):
return list(
Collection.objects.filter(author_id=settings.TASK_USER_ID)
.order_by('-modified')
.values_list('modified', 'slug', 'author_id', named=True)
.iterator()
)
def lastmod(self, item):
return item.modified
def location(self, item):
return Collection.get_url_path(item)
class AccountSitemap(Sitemap):
item_tuple = namedtuple(
'AccountItem',
['addons_updated', 'url', 'extension_page', 'theme_page'],
defaults=(1, 1),
)
@cached_property
def _cached_items(self):
current_app = self._current_app
addon_q = Q(
addons___current_version__isnull=False,
addons___current_version__apps__application=current_app.id,
addons__disabled_by_user=False,
addons__status__in=amo.REVIEWED_STATUSES,
addonuser__listed=True,
addonuser__role__in=(amo.AUTHOR_ROLE_DEV, amo.AUTHOR_ROLE_OWNER),
)
# android is currently limited to a small number of recommended addons, so get
# the list of those and filter further
if current_app == amo.ANDROID:
promoted_addon_ids = get_android_promoted_addons().values_list(
'addon_id', flat=True
)
addon_q = addon_q & Q(addons__id__in=promoted_addon_ids)
users = (
UserProfile.objects.filter(is_public=True, deleted=False)
.annotate(
theme_count=Count(
'addons', filter=Q(addon_q, addons__type=amo.ADDON_STATICTHEME)
)
)
.annotate(
extension_count=Count(
'addons', filter=Q(addon_q, addons__type=amo.ADDON_EXTENSION)
)
)
.annotate(addons_updated=Max('addons__last_updated', filter=addon_q))
.order_by('-addons_updated', '-modified')
.values_list(
'addons_updated', 'id', 'extension_count', 'theme_count', named=True
)
.iterator()
)
items = []
for user in users:
if not user.extension_count and not user.theme_count:
# some users have an empty page for various reasons, no need to include
continue
extension_pages_needed = math.ceil(
(user.extension_count or 1) / EXTENSIONS_BY_AUTHORS_PAGE_SIZE
)
theme_pages_needed = math.ceil(
(user.theme_count or 1) / THEMES_BY_AUTHORS_PAGE_SIZE
)
items.extend(
self.item_tuple(
user.addons_updated,
reverse('users.profile', args=[user.id]),
ext_page,
1,
)
for ext_page in range(1, extension_pages_needed + 1)
)
# start themes at 2 because we don't want (1, 1) twice
items.extend(
self.item_tuple(
user.addons_updated,
reverse('users.profile', args=[user.id]),
1,
theme_page,
)
for theme_page in range(2, theme_pages_needed + 1)
)
return items
def lastmod(self, item):
return item.addons_updated
def location(self, item):
urlargs = '&'.join(
([f'page_e={item.extension_page}'] if item.extension_page > 1 else [])
+ ([f'page_t={item.theme_page}'] if item.theme_page > 1 else [])
)
return item.url + (f'?{urlargs}' if urlargs else '')
class TagPagesSitemap(Sitemap):
lastmod = datetime.datetime.now()
@cached_property
def _cached_items(self):
page_size = settings.REST_FRAMEWORK['PAGE_SIZE']
page_count_max = settings.ES_MAX_RESULT_WINDOW // page_size
current_app = self._current_app
counts_qs = (
AddonTag.objects.filter(
addon___current_version__isnull=False,
addon___current_version__apps__application=current_app.id,
addon__disabled_by_user=False,
addon__status__in=amo.REVIEWED_STATUSES,
)
.values('tag_id')
.annotate(count=Count('addon_id'))
)
addon_counts = {tag['tag_id']: tag['count'] for tag in counts_qs}
items = []
for tag in Tag.objects.all():
items.append((tag, 1))
pages_needed = min(
math.ceil(addon_counts.get(tag.id, 1) / page_size),
page_count_max,
)
for page in range(2, pages_needed + 1):
items.append((tag, page))
return items
def location(self, item):
(tag, page) = item
return tag.get_url_path() + (f'?page={page}' if page > 1 else '')
def get_sitemaps():
return {
# because some urls are app-less, we specify per item, so don't specify an app
('amo', None): AMOSitemap(),
('addons', amo.FIREFOX): AddonSitemap(),
('addons', amo.ANDROID): AddonSitemap(),
# category pages aren't supported on android, so firefox only
('categories', amo.FIREFOX): CategoriesSitemap(),
# we don't expose collections on android, so firefox only
('collections', amo.FIREFOX): CollectionSitemap(),
('users', amo.FIREFOX): AccountSitemap(),
('users', amo.ANDROID): AccountSitemap(),
('tags', amo.FIREFOX): TagPagesSitemap(),
('tags', amo.ANDROID): TagPagesSitemap(),
}
OTHER_SITEMAPS = [
'/blog/sitemap.xml',
]
def get_sitemap_section_pages(sitemaps):
pages = []
for (section, app), site in sitemaps.items():
if not app:
pages.extend((section, None, page) for page in site.paginator.page_range)
continue
with override_url_prefix(app_name=app.short):
# Add all pages of the sitemap section.
pages.extend(
(section, app.short, page) for page in site.paginator.page_range
)
return pages
def render_index_xml(sitemaps):
sitemap_url = reverse('amo.sitemap')
server_urls = (
f'{sitemap_url}?section={section}'
+ (f'&app_name={app_name}' if app_name else '')
+ (f'&p={page}' if page != 1 else '')
for section, app_name, page in get_sitemap_section_pages(sitemaps)
)
urls = list(server_urls) + OTHER_SITEMAPS
return loader.render_to_string(
'sitemap_index.xml',
{'sitemaps': (absolutify(url) for url in urls)},
)
def get_sitemap_path(section, app, page=1):
return os.path.join(
settings.SITEMAP_STORAGE_PATH,
'sitemap'
+ (f'-{section}' if section else '')
+ (f'-{app}' if app else '')
+ (f'-{page}' if page != 1 else '')
+ '.xml',
)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
import socket
from .asynclib import loop
from .codec import encode
from .constants import ENQ, EOT
from .exceptions import NotAccepted
from .mapping import Record
from .protocol import ASTMProtocol
log = logging.getLogger(__name__)
__all__ = ['Client', 'Emitter']
class RecordsStateMachine(object):
"""Simple state machine to track emitting ASTM records in right order.
:param mapping: Mapping of the ASTM records flow order.
Keys should be string and defines record type, while values
expected as sequence of other record types that may be used
after current one.
For example: ``{"H": ["P", "C", "L"]}`` mapping defines that
if previous record had ``"H"`` type, then the next one
should have ``"P"``, ``"C"`` or ``"L"`` type or
:exc:`AssertionError` will be raised. The default mapping
reflects common ASTM records flow rules. If this argument
specified as :const:`None` no rules will be applied.
:type: dict
"""
def __init__(self, mapping):
self.mapping = mapping
self.state = None
def __call__(self, state):
if state is not None:
assert self.is_acceptable(state),\
'invalid state %r, expected one of: %r' \
% (state, self.mapping[self.state])
self.state = state
def is_acceptable(self, state):
if self.mapping is None:
return True
if state not in self.mapping:
return False
next_types = self.mapping[self.state]
return '*' in next_types or state in next_types
DEFAULT_RECORDS_FLOW_MAP = {
None: ['H'],
'H': ['C', 'M', 'P', 'Q', 'L'],
'P': ['C', 'M', 'O', 'L'],
'Q': ['C', 'M', 'O', 'L'],
'O': ['C', 'M', 'P', 'O', 'R', 'L'],
'R': ['C', 'M', 'P', 'O', 'R', 'S', 'L'],
'S': ['C', 'M', 'P', 'O', 'R', 'S', 'L'],
'C': ['*'],
'M': ['*'],
'L': ['H']
}
class Emitter(object):
"""ASTM records emitter for :class:`Client`.
Used as wrapper for user provided one to provide proper routines around for
sending Header and Terminator records.
:param emitter: Generator/coroutine.
:param encoding: Data encoding.
:type encoding: str
:param flow_map: Records flow map. Used by :class:`RecordsStateMachine`.
:type: dict
:param chunk_size: Chunk size in bytes. If :const:`None`, emitter record
wouldn't be split into chunks.
:type chunk_size: int
:param bulk_mode: Sends all records for single session (starts from Header
and ends with Terminator records) via single message
instead of sending each record separately. If result
message is too long, it may be split by chunks if
`chunk_size` is not :const:`None`. Keep in mind, that
collecting all records for single session may take some
time and server may reject data by timeout reason.
:type bulk_mode: bool
"""
#: Records state machine controls emitting records in right order. It
#: receives `records_flow_map` as only argument on Emitter initialization.
state_machine = RecordsStateMachine
def __init__(self, emitter, flow_map, encoding,
chunk_size=None, bulk_mode=False):
self._emitter = emitter()
self._is_active = False
self.encoding = encoding
self.records_sm = self.state_machine(flow_map)
# flag to signal that user's emitter produces no records
self.empty = False
# last sent sequence number
self.last_seq = 0
self.buffer = []
self.chunk_size = chunk_size
self.bulk_mode = bulk_mode
def _get_record(self, value=None):
record = self._emitter.send(value if self._is_active else None)
if not self._is_active:
self._is_active = True
if isinstance(record, Record):
record = record.to_astm()
try:
self.records_sm(record[0])
except Exception as err:
self.throw(type(err), err.args)
return record
def _send_record(self, record):
if self.bulk_mode:
records = [record]
while True:
record = self._get_record(True)
records.append(record)
if record[0] == 'L':
break
chunks = encode(records, self.encoding, self.chunk_size)
else:
self.last_seq += 1
chunks = encode([record], self.encoding,
self.chunk_size, self.last_seq)
self.buffer.extend(chunks)
data = self.buffer.pop(0)
self.last_seq += len(self.buffer)
if record[0] == 'L':
self.last_seq = 0
self.buffer.append(EOT)
return data
def send(self, value=None):
"""Passes `value` to the emitter. Semantically acts in same way as
:meth:`send` for generators.
If the emitter has any value within local `buffer` the returned value
will be extracted from it unless `value` is :const:`False`.
:param value: Callback value. :const:`True` indicates that previous
record was successfully received and accepted by server,
:const:`False` signs about his rejection.
:type value: bool
:return: Next record data to send to server.
:rtype: bytes
"""
if self.buffer and value:
return self.buffer.pop(0)
record = self._get_record(value)
return self._send_record(record)
def throw(self, exc_type, exc_val=None, exc_tb=None):
"""Raises exception inside the emitter. Acts in same way as
:meth:`throw` for generators.
If the emitter had catch an exception and return any record value, it
will be proceeded in common way.
"""
record = self._emitter.throw(exc_type, exc_val, exc_tb)
if record is not None:
return self._send_record(record)
def close(self):
"""Closes the emitter. Acts in same way as :meth:`close` for generators.
"""
self._emitter.close()
class Client(ASTMProtocol):
"""Common ASTM client implementation.
:param emitter: Generator function that will produce ASTM records.
:type emitter: function
:param host: Server IP address or hostname.
:type host: str
:param port: Server port number.
:type port: int
:param timeout: Time to wait for response from server. If response wasn't
received, the :meth:`on_timeout` will be called.
If :const:`None` this timer will be disabled.
:type timeout: int
:param flow_map: Records flow map. Used by :class:`RecordsStateMachine`.
:type: dict
:param chunk_size: Chunk size in bytes. :const:`None` value prevents
records chunking.
:type chunk_size: int
:param bulk_mode: Sends all records for single session (starts from Header
and ends with Terminator records) via single message
instead of sending each record separately. If result
message is too long, it may be split by chunks if
`chunk_size` is not :const:`None`. Keep in mind, that
collecting all records for single session may take some
time and server may reject data by timeout reason.
:type bulk_mode: bool
Base `emitter` is a generator that yield ASTM records one by one preserving
their order::
from astm.records import (
HeaderRecord, PatientRecord, OrderRecord, TerminatorRecord
)
def emitter():
assert (yield HeaderRecord()), 'header was rejected'
ok = yield PatientRecord(name={'last': 'foo', 'first': 'bar'})
if ok: # you also can decide what to do in case of record rejection
assert (yield OrderRecord())
yield TerminatorRecord() # we may do not care about rejection
:class:`Client` thought :class:`RecordsStateMachine` keep track
on this order, raising :exc:`AssertionError` if it is broken.
When `emitter` terminates with :exc:`StopIteration` or :exc:`GeneratorExit`
exception client connection to server closing too. You may provide endless
`emitter` by wrapping function body with ``while True: ...`` loop polling
data from source from time to time. Note, that server may have communication
timeouts control and may close session after some time of inactivity, so
be sure that you're able to send whole session (started by Header record and
ended by Terminator one) within limited time frame (commonly 10-15 sec.).
"""
#: Wrapper of emitter to provide session context and system logic about
#: sending head and tail data.
emitter_wrapper = Emitter
def __init__(self, emitter, host='localhost', port=15200,
encoding=None, timeout=20, flow_map=DEFAULT_RECORDS_FLOW_MAP,
chunk_size=None, bulk_mode=False):
super(Client, self).__init__(timeout=timeout)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
self.emitter = self.emitter_wrapper(
emitter,
encoding=encoding or self.encoding,
flow_map=flow_map,
chunk_size=chunk_size,
bulk_mode=bulk_mode
)
self.terminator = 1
def handle_connect(self):
"""Initiates ASTM communication session."""
super(Client, self).handle_connect()
self._open_session()
def handle_close(self):
self.emitter.close()
super(Client, self).handle_close()
def _open_session(self):
self.push(ENQ)
def _close_session(self, close_connection=False):
self.push(EOT)
if close_connection:
self.close_when_done()
def run(self, timeout=1.0, *args, **kwargs):
"""Enters into the :func:`polling loop <astm.asynclib.loop>` to let
client send outgoing requests."""
loop(timeout, *args, **kwargs)
def on_enq(self):
"""Raises :class:`NotAccepted` exception."""
raise NotAccepted('Client should not receive ENQ.')
def on_ack(self):
"""Handles ACK response from server.
Provides callback value :const:`True` to the emitter and sends next
message to server.
"""
try:
message = self.emitter.send(True)
except StopIteration:
self._close_session(True)
else:
self.push(message)
if message == EOT:
self._open_session()
def on_nak(self):
"""Handles NAK response from server.
If it was received on ENQ request, the client tries to repeat last
request for allowed amount of attempts. For others it send callback
value :const:`False` to the emitter."""
if self._last_sent_data == ENQ:
return self.push(ENQ)
try:
message = self.emitter.send(False)
except StopIteration:
self._close_session(True)
except Exception:
self._close_session(True)
raise
else:
self.push(message)
if message == EOT:
self._open_session()
def on_eot(self):
"""Raises :class:`NotAccepted` exception."""
raise NotAccepted('Client should not receive EOT.')
def on_message(self):
"""Raises :class:`NotAccepted` exception."""
raise NotAccepted('Client should not receive ASTM message.')
def on_timeout(self):
"""Sends final EOT message and closes connection after his receiving."""
super(Client, self).on_timeout()
self._close_session(True)
|
|
from __future__ import unicode_literals
import platform
import re
import pytest
from easysnmp.exceptions import (
EasySNMPError, EasySNMPConnectionError, EasySNMPTimeoutError,
EasySNMPNoSuchObjectError, EasySNMPNoSuchInstanceError,
EasySNMPNoSuchNameError
)
from easysnmp.session import Session
from .fixtures import sess_v1, sess_v2, sess_v3
from .helpers import snmp_set_via_cli
@pytest.yield_fixture(autouse=True)
def reset_values():
snmp_set_via_cli('sysLocation.0', 'my original location', 's')
snmp_set_via_cli('nsCacheTimeout.1.3.6.1.2.1.2.2', '0', 'i')
yield
def test_session_invalid_snmp_version():
with pytest.raises(ValueError):
Session(version=4)
@pytest.mark.parametrize('version', [1, 2, 3])
def test_session_invalid_hostname(version):
with pytest.raises(EasySNMPConnectionError):
session = Session(hostname='invalid', version=version)
session.get('sysContact.0')
@pytest.mark.parametrize('version', [1, 2, 3])
def test_session_invalid_hostname_and_remote_port(version):
with pytest.raises(ValueError):
Session(hostname='localhost:162', remote_port=163, version=version)
@pytest.mark.parametrize('version', [1, 2, 3])
def test_session_hostname_and_remote_port_split(version):
session = Session(hostname='localhost:162', version=version)
assert session.hostname == 'localhost'
assert session.remote_port == 162
@pytest.mark.parametrize('version', [1, 2, 3])
def test_session_invalid_port(version):
with pytest.raises(EasySNMPTimeoutError):
session = Session(
remote_port=1234, version=version, timeout=0.2, retries=1
)
session.get('sysContact.0')
# TODO: Determine how to test this more than once without a problem
# @pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
@pytest.mark.parametrize('sess', [sess_v1()])
def test_session_set_multiple_next(sess):
success = sess.set_multiple([
('.1.3.6.1.6.3.12.1.2.1.2.116.101.115.116', '.1.3.6.1.6.1.1'),
('.1.3.6.1.6.3.12.1.2.1.3.116.101.115.116', '1234'),
('.1.3.6.1.6.3.12.1.2.1.9.116.101.115.116', 4),
])
assert success
res = sess.get_next([
'snmpTargetAddrTDomain', 'snmpTargetAddrTAddress',
'snmpTargetAddrRowStatus'
])
assert len(res) == 3
assert res[0].oid == 'snmpTargetAddrTDomain'
assert res[0].oid_index == '116.101.115.116'
assert res[0].value == '.1.3.6.1.6.1.1'
assert res[0].snmp_type == 'OBJECTID'
assert res[1].oid == 'snmpTargetAddrTAddress'
assert res[1].oid_index == '116.101.115.116'
assert res[1].value == '1234'
assert res[1].snmp_type == 'OCTETSTR'
assert res[2].oid == 'snmpTargetAddrRowStatus'
assert res[2].oid_index == '116.101.115.116'
assert res[2].value == '3'
assert res[2].snmp_type == 'INTEGER'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_set_clear(sess):
res = sess.set('.1.3.6.1.6.3.12.1.2.1.9.116.101.115.116', 6)
assert res == 1
res = sess.get_next([
'snmpTargetAddrTDomain', 'snmpTargetAddrTAddress',
'snmpTargetAddrRowStatus'
])
assert len(res) == 3
assert res[0].oid == 'snmpUnavailableContexts'
assert res[0].oid_index == '0'
assert res[0].value == '0'
assert res[0].snmp_type == 'COUNTER'
assert res[1].oid == 'snmpUnavailableContexts'
assert res[1].oid_index == '0'
assert res[1].value == '0'
assert res[1].snmp_type == 'COUNTER'
assert res[2].oid == 'snmpUnavailableContexts'
assert res[2].oid_index == '0'
assert res[2].value == '0'
assert res[2].snmp_type == 'COUNTER'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_get(sess):
res = sess.get([
('sysUpTime', '0'),
('sysContact', '0'),
('sysLocation', '0')
])
assert len(res) == 3
assert res[0].oid == 'sysUpTimeInstance'
assert res[0].oid_index == ''
assert int(res[0].value) > 0
assert res[0].snmp_type == 'TICKS'
assert res[1].oid == 'sysContact'
assert res[1].oid_index == '0'
assert res[1].value == 'G. S. Marzot <[email protected]>'
assert res[1].snmp_type == 'OCTETSTR'
assert res[2].oid == 'sysLocation'
assert res[2].oid_index == '0'
assert res[2].value == 'my original location'
assert res[2].snmp_type == 'OCTETSTR'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_get_use_numeric(sess):
sess.use_numeric = True
res = sess.get('sysContact.0')
assert res.oid == '.1.3.6.1.2.1.1.4'
assert res.oid_index == '0'
assert res.value == 'G. S. Marzot <[email protected]>'
assert res.snmp_type == 'OCTETSTR'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_get_use_sprint_value(sess):
sess.use_sprint_value = True
res = sess.get('sysUpTimeInstance')
assert res.oid == 'sysUpTimeInstance'
assert res.oid_index == ''
assert re.match(r'^\d+:\d+:\d+:\d+\.\d+$', res.value)
assert res.snmp_type == 'TICKS'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_get_use_enums(sess):
sess.use_enums = True
res = sess.get('ifAdminStatus.1')
assert res.oid == 'ifAdminStatus'
assert res.oid_index == '1'
assert res.value == 'up'
assert res.snmp_type == 'INTEGER'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_get_next(sess):
res = sess.get_next([
('sysUpTime', '0'),
('sysContact', '0'),
('sysLocation', '0')
])
assert len(res) == 3
assert res[0].oid == 'sysContact'
assert res[0].oid_index == '0'
assert res[0].value == 'G. S. Marzot <[email protected]>'
assert res[0].snmp_type == 'OCTETSTR'
assert res[1].oid == 'sysName'
assert res[1].oid_index == '0'
assert res[1].value == platform.node()
assert res[1].snmp_type == 'OCTETSTR'
assert res[2].oid == 'sysORLastChange'
assert res[2].oid_index == '0'
assert int(res[2].value) >= 0
assert res[2].snmp_type == 'TICKS'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_set(sess):
res = sess.get(('sysLocation', '0'))
assert res.value != 'my newer location'
success = sess.set(('sysLocation', '0'), 'my newer location')
assert success
res = sess.get(('sysLocation', '0'))
assert res.value == 'my newer location'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_set_multiple(sess):
res = sess.get(['sysLocation.0', 'nsCacheTimeout.1.3.6.1.2.1.2.2'])
assert res[0].value != 'my newer location'
assert res[1].value != '160'
success = sess.set_multiple([
('sysLocation.0', 'my newer location'),
(('nsCacheTimeout', '.1.3.6.1.2.1.2.2'), 160),
])
assert success
res = sess.get(['sysLocation.0', 'nsCacheTimeout.1.3.6.1.2.1.2.2'])
assert res[0].value == 'my newer location'
assert res[1].value == '160'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_get_bulk(sess): # noqa
if sess.version == 1:
with pytest.raises(EasySNMPError):
sess.get_bulk(
['sysUpTime', 'sysORLastChange', 'sysORID', 'sysORDescr',
'sysORUpTime'],
2, 8
)
else:
res = sess.get_bulk(
['sysUpTime', 'sysORLastChange', 'sysORID', 'sysORDescr',
'sysORUpTime'], 2, 8
)
assert len(res) == 26
assert res[0].oid == 'sysUpTimeInstance'
assert res[0].oid_index == ''
assert int(res[0].value) > 0
assert res[0].snmp_type == 'TICKS'
assert res[4].oid == 'sysORUpTime'
assert res[4].oid_index == '1'
assert int(res[4].value) >= 0
assert res[4].snmp_type == 'TICKS'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_get_invalid_instance(sess):
# Sadly, SNMP v1 doesn't distuingish between an invalid instance and an
# invalid object ID, instead it excepts with noSuchName
if sess.version == 1:
with pytest.raises(EasySNMPNoSuchNameError):
sess.get('sysDescr.100')
else:
res = sess.get('sysDescr.100')
assert res.snmp_type == 'NOSUCHINSTANCE'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_get_invalid_instance_with_abort_enabled(sess):
# Sadly, SNMP v1 doesn't distuingish between an invalid instance and an
# invalid object ID, instead it excepts with noSuchName
sess.abort_on_nonexistent = True
if sess.version == 1:
with pytest.raises(EasySNMPNoSuchNameError):
sess.get('sysDescr.100')
else:
with pytest.raises(EasySNMPNoSuchInstanceError):
sess.get('sysDescr.100')
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_get_invalid_object(sess):
if sess.version == 1:
with pytest.raises(EasySNMPNoSuchNameError):
sess.get('iso')
else:
res = sess.get('iso')
assert res.snmp_type == 'NOSUCHOBJECT'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_get_invalid_object_with_abort_enabled(sess):
sess.abort_on_nonexistent = True
if sess.version == 1:
with pytest.raises(EasySNMPNoSuchNameError):
sess.get('iso')
else:
with pytest.raises(EasySNMPNoSuchObjectError):
sess.get('iso')
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_walk(sess):
res = sess.walk('system')
assert len(res) >= 7
assert res[0].oid == 'sysDescr'
assert res[0].oid_index == '0'
assert platform.version() in res[0].value
assert res[0].snmp_type == 'OCTETSTR'
assert res[3].oid == 'sysContact'
assert res[3].oid_index == '0'
assert res[3].value == 'G. S. Marzot <[email protected]>'
assert res[3].snmp_type == 'OCTETSTR'
assert res[4].oid == 'sysName'
assert res[4].oid_index == '0'
assert res[4].value == platform.node()
assert res[4].snmp_type == 'OCTETSTR'
assert res[5].oid == 'sysLocation'
assert res[5].oid_index == '0'
assert res[5].value == 'my original location'
assert res[5].snmp_type == 'OCTETSTR'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_bulkwalk(sess):
if sess.version == 1:
with pytest.raises(EasySNMPError):
sess.bulkwalk('system')
else:
res = sess.walk('system')
assert len(res) >= 7
assert res[0].oid == 'sysDescr'
assert res[0].oid_index == '0'
assert platform.version() in res[0].value
assert res[0].snmp_type == 'OCTETSTR'
assert res[3].oid == 'sysContact'
assert res[3].oid_index == '0'
assert res[3].value == 'G. S. Marzot <[email protected]>'
assert res[3].snmp_type == 'OCTETSTR'
assert res[4].oid == 'sysName'
assert res[4].oid_index == '0'
assert res[4].value == platform.node()
assert res[4].snmp_type == 'OCTETSTR'
assert res[5].oid == 'sysLocation'
assert res[5].oid_index == '0'
assert res[5].value == 'my original location'
assert res[5].snmp_type == 'OCTETSTR'
@pytest.mark.parametrize('sess', [sess_v1(), sess_v2(), sess_v3()])
def test_session_walk_all(sess):
# TODO: Determine why walking iso doesn't work for SNMP v1
if sess.version == 1:
with pytest.raises(EasySNMPNoSuchNameError):
sess.walk('.')
else:
res = sess.walk('.')
assert len(res) > 0
assert res[0].oid == 'sysDescr'
assert res[0].oid_index == '0'
assert platform.version() in res[0].value
assert res[0].snmp_type == 'OCTETSTR'
assert res[3].oid == 'sysContact'
assert res[3].oid_index == '0'
assert res[3].value == 'G. S. Marzot <[email protected]>'
assert res[3].snmp_type == 'OCTETSTR'
assert res[4].oid == 'sysName'
assert res[4].oid_index == '0'
assert res[4].value == platform.node()
assert res[4].snmp_type == 'OCTETSTR'
assert res[5].oid == 'sysLocation'
assert res[5].oid_index == '0'
assert res[5].value == 'my original location'
assert res[5].snmp_type == 'OCTETSTR'
|
|
import os
import hashlib
import inspect
import re
import zlib
from lib import BaseTest
def strip_processor(output):
return "\n".join([l for l in output.split("\n") if not l.startswith(' ') and not l.startswith('Date:')])
def ungzip_if_required(output):
if output.startswith("\x1f\x8b"):
return zlib.decompress(output, 16+zlib.MAX_WBITS)
return output
class PublishRepo1Test(BaseTest):
"""
publish repo: default
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick local-repo"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo1Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/Contents-i386.gz')
self.check_exists('public/dists/maverick/main/source/Sources')
self.check_exists('public/dists/maverick/main/source/Sources.gz')
self.check_exists('public/dists/maverick/main/source/Sources.bz2')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_exists('public/pool/main/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('public/pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('public/dists/maverick/main/source/Sources', 'sources', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
self.check_file_contents('public/dists/maverick/main/binary-i386/Packages', 'binary', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
self.check_file_contents('public/dists/maverick/main/Contents-i386.gz', 'contents_i386', match_prepare=ungzip_if_required)
self.check_file_contents('public/dists/maverick/Contents-i386.gz', 'contents_i386_legacy', match_prepare=ungzip_if_required)
# verify signatures
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
# verify sums
release = self.read_file('public/dists/maverick/Release').split("\n")
release = [l for l in release if l.startswith(" ")]
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
pathsSeen.add(path)
fileSize = int(fileSize)
st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/', path))
if fileSize != st.st_size:
raise Exception("file size doesn't match for %s: %d != %d" % (path, fileSize, st.st_size))
if len(fileHash) == 32:
h = hashlib.md5()
elif len(fileHash) == 40:
h = hashlib.sha1()
elif len(fileHash) == 64:
h = hashlib.sha256()
else:
h = hashlib.sha512()
h.update(self.read_file(os.path.join('public/dists/maverick', path)))
if h.hexdigest() != fileHash:
raise Exception("file hash doesn't match for %s: %s != %s" % (path, fileHash, h.hexdigest()))
if pathsSeen != set(['main/binary-i386/Packages', 'main/binary-i386/Packages.bz2', 'main/binary-i386/Packages.gz',
'main/source/Sources', 'main/source/Sources.gz', 'main/source/Sources.bz2',
'main/binary-i386/Release', 'main/source/Release', 'main/Contents-i386.gz',
'Contents-i386.gz']):
raise Exception("path seen wrong: %r" % (pathsSeen, ))
class PublishRepo2Test(BaseTest):
"""
publish repo: different component
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick -component=contrib local-repo"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo2Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/contrib/Contents-i386.gz')
self.check_exists('public/dists/maverick/contrib/source/Sources')
self.check_exists('public/dists/maverick/contrib/source/Sources.gz')
self.check_exists('public/dists/maverick/contrib/source/Sources.bz2')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_exists('public/pool/contrib/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('public/pool/contrib/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
class PublishRepo3Test(BaseTest):
"""
publish repo: different architectures
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly -architectures=i386 publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick -component=contrib local-repo"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo3Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/contrib/Contents-i386.gz')
self.check_not_exists('public/dists/maverick/contrib/source/Sources')
self.check_not_exists('public/dists/maverick/contrib/source/Sources.gz')
self.check_not_exists('public/dists/maverick/contrib/source/Sources.bz2')
self.check_not_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_not_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_not_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_not_exists('public/pool/contrib/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('public/pool/contrib/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
class PublishRepo4Test(BaseTest):
"""
publish repo: under prefix
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick local-repo ppa"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo4Test, self).check()
self.check_exists('public/ppa/dists/maverick/InRelease')
self.check_exists('public/ppa/dists/maverick/Release')
self.check_exists('public/ppa/dists/maverick/Release.gpg')
self.check_exists('public/ppa/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/ppa/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/ppa/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/ppa/dists/maverick/main/Contents-i386.gz')
self.check_exists('public/ppa/dists/maverick/main/source/Sources')
self.check_exists('public/ppa/dists/maverick/main/source/Sources.gz')
self.check_exists('public/ppa/dists/maverick/main/source/Sources.bz2')
self.check_exists('public/ppa/pool/main/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_exists('public/ppa/pool/main/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('public/ppa/pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_exists('public/ppa/pool/main/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('public/ppa/pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
class PublishRepo5Test(BaseTest):
"""
publish repo: specify distribution
"""
fixtureDB = True
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo local-repo"
expectedCode = 1
class PublishRepo6Test(BaseTest):
"""
publish repo: double publish under root
"""
fixtureDB = True
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick local-repo",
]
runCmd = "aptly publish repo -distribution=maverick local-repo"
expectedCode = 1
class PublishRepo7Test(BaseTest):
"""
publish repo: double publish under prefix
"""
fixtureDB = True
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick local-repo ./ppa",
]
runCmd = "aptly publish repo -distribution=maverick local-repo ppa"
expectedCode = 1
class PublishRepo8Test(BaseTest):
"""
publish repo: wrong prefix
"""
fixtureDB = True
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -distribution=maverick local-repo ppa/dists/la"
expectedCode = 1
class PublishRepo9Test(BaseTest):
"""
publish repo: wrong prefix
"""
fixtureDB = True
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -distribution=maverick local-repo ppa/pool/la"
expectedCode = 1
class PublishRepo10Test(BaseTest):
"""
publish repo: wrong prefix
"""
fixtureDB = True
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -distribution=maverick local-repo ../la"
expectedCode = 1
class PublishRepo11Test(BaseTest):
"""
publish repo: no snapshot
"""
runCmd = "aptly publish repo local-repo"
expectedCode = 1
class PublishRepo12Test(BaseTest):
"""
publish repo: -skip-signing
"""
fixtureDB = True
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -skip-signing -distribution=maverick local-repo"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo12Test, self).check()
self.check_not_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_not_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/Contents-i386.gz')
self.check_exists('public/dists/maverick/main/source/Sources')
self.check_exists('public/dists/maverick/main/source/Sources.gz')
self.check_exists('public/dists/maverick/main/source/Sources.bz2')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
class PublishRepo13Test(BaseTest):
"""
publish repo: empty repo is not publishable w/o architectures list
"""
fixtureDB = True
fixtureCmds = [
"aptly repo create local-repo",
]
runCmd = "aptly publish repo --distribution=mars --skip-signing local-repo"
expectedCode = 1
class PublishRepo14Test(BaseTest):
"""
publish repo: publishing defaults from local repo
"""
fixtureCmds = [
"aptly repo create -distribution=maverick -component=contrib local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec local-repo"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo14Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/contrib/Contents-i386.gz')
self.check_exists('public/dists/maverick/contrib/source/Sources')
self.check_exists('public/dists/maverick/contrib/source/Sources.gz')
self.check_exists('public/dists/maverick/contrib/source/Sources.bz2')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_exists('public/pool/contrib/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('public/pool/contrib/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
class PublishRepo15Test(BaseTest):
"""
publish repo: custom label
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick -component=contrib -label=label15 local-repo"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo15Test, self).check()
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
class PublishRepo16Test(BaseTest):
"""
publish repo: empty repo is publishable with architectures list
"""
fixtureDB = True
fixtureCmds = [
"aptly repo create local-repo",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -architectures=source,i386 --distribution=maverick local-repo"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo16Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/source/Sources')
self.check_exists('public/dists/maverick/main/source/Sources.gz')
self.check_exists('public/dists/maverick/main/source/Sources.bz2')
class PublishRepo17Test(BaseTest):
"""
publish repo: multiple component
"""
fixtureCmds = [
"aptly repo create repo1",
"aptly repo create repo2",
"aptly repo add repo1 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb ${files}/pyspi_0.6.1-1.3.dsc",
"aptly repo add repo2 ${files}/pyspi-0.6.1-1.3.stripped.dsc",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -component=main,contrib -distribution=maverick repo1 repo2"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo17Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/source/Sources')
self.check_exists('public/dists/maverick/main/source/Sources.gz')
self.check_exists('public/dists/maverick/main/source/Sources.bz2')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/contrib/source/Sources')
self.check_exists('public/dists/maverick/contrib/source/Sources.gz')
self.check_exists('public/dists/maverick/contrib/source/Sources.bz2')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_exists('public/pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_exists('public/pool/contrib/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
# verify signatures
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
# verify sums
release = self.read_file('public/dists/maverick/Release').split("\n")
release = [l for l in release if l.startswith(" ")]
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
pathsSeen.add(path)
fileSize = int(fileSize)
st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/', path))
if fileSize != st.st_size:
raise Exception("file size doesn't match for %s: %d != %d" % (path, fileSize, st.st_size))
if len(fileHash) == 32:
h = hashlib.md5()
elif len(fileHash) == 40:
h = hashlib.sha1()
elif len(fileHash) == 64:
h = hashlib.sha256()
else:
h = hashlib.sha512()
h.update(self.read_file(os.path.join('public/dists/maverick', path)))
if h.hexdigest() != fileHash:
raise Exception("file hash doesn't match for %s: %s != %s" % (path, fileHash, h.hexdigest()))
if pathsSeen != set(['main/binary-i386/Packages', 'main/binary-i386/Packages.gz',
'main/binary-i386/Packages.bz2',
'main/source/Sources', 'main/source/Sources.gz', 'main/source/Sources.bz2',
'contrib/binary-i386/Packages', 'contrib/binary-i386/Packages.gz',
'contrib/binary-i386/Packages.bz2',
'contrib/source/Sources', 'contrib/source/Sources.gz', 'contrib/source/Sources.bz2',
'main/source/Release', 'contrib/source/Release',
'main/binary-i386/Release', 'contrib/binary-i386/Release',
'main/Contents-i386.gz', 'Contents-i386.gz']):
raise Exception("path seen wrong: %r" % (pathsSeen, ))
class PublishRepo18Test(BaseTest):
"""
publish repo: multiple component, guessing component names
"""
fixtureCmds = [
"aptly repo create -distribution=squeeze -component=main repo1",
"aptly repo create -distribution=squeeze -component=contrib repo2",
"aptly repo add repo1 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb ${files}/pyspi_0.6.1-1.3.dsc",
"aptly repo add repo2 ${files}/pyspi-0.6.1-1.3.stripped.dsc",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -component=, repo1 repo2"
gold_processor = BaseTest.expand_environ
class PublishRepo19Test(BaseTest):
"""
publish repo: duplicate component name (guessed)
"""
fixtureCmds = [
"aptly repo create -distribution=squeeze -component=contrib repo1",
"aptly repo create -distribution=squeeze -component=contrib repo2",
"aptly repo add repo1 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb ${files}/pyspi_0.6.1-1.3.dsc",
"aptly repo add repo2 ${files}/pyspi-0.6.1-1.3.stripped.dsc",
]
runCmd = "aptly publish repo -component=, repo1 repo2"
expectedCode = 1
class PublishRepo20Test(BaseTest):
"""
publish repo: duplicate component name (manual)
"""
fixtureCmds = [
"aptly repo create -distribution=squeeze -component=main repo1",
"aptly repo create -distribution=squeeze -component=contrib repo2",
]
runCmd = "aptly publish repo -component=b,b repo1 repo2"
expectedCode = 1
class PublishRepo21Test(BaseTest):
"""
publish repo: distribution conflict
"""
fixtureCmds = [
"aptly repo create -distribution=squeeze -component=main repo1",
"aptly repo create -distribution=wheezy -component=contrib repo2",
]
runCmd = "aptly publish repo -component=, repo1 repo2"
expectedCode = 1
class PublishRepo22Test(BaseTest):
"""
publish reop: no such repo
"""
fixtureCmds = [
"aptly repo create -distribution=squeeze -component=main repo1",
]
runCmd = "aptly publish repo -component=, repo1 repo2"
expectedCode = 1
class PublishRepo23Test(BaseTest):
"""
publish repo: mismatch in count
"""
fixtureCmds = [
"aptly repo create -distribution=squeeze -component=main repo1",
]
runCmd = "aptly publish repo -component=main,contrib repo1"
expectedCode = 2
def outputMatchPrepare(_, s):
return "\n".join([l for l in s.split("\n") if l.startswith("ERROR")])
class PublishRepo24Test(BaseTest):
"""
publish repo: conflicting files in the repo
"""
fixtureCmds = [
"aptly repo create local-repo1",
"aptly repo add local-repo1 ${files}",
"aptly repo create local-repo2",
"aptly repo add local-repo2 ${testfiles}",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick local-repo1",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=squeeze local-repo2"
expectedCode = 1
gold_processor = BaseTest.expand_environ
class PublishRepo25Test(BaseTest):
"""
publish repo: -force-overwrite
"""
fixtureCmds = [
"aptly repo create local-repo1",
"aptly repo add local-repo1 ${files}",
"aptly repo create local-repo2",
"aptly repo add local-repo2 ${testfiles}",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick local-repo1",
]
runCmd = "aptly publish repo -force-overwrite -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=squeeze local-repo2"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo25Test, self).check()
self.check_file_contents("public/pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz", "file")
class PublishRepo26Test(BaseTest):
"""
publish repo: sign with passphrase
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly_passphrase.pub -secret-keyring=${files}/aptly_passphrase.sec -passphrase=verysecret -distribution=maverick local-repo"
gold_processor = BaseTest.expand_environ
def outputMatchPrepare(_, s):
return s.replace("gpg: gpg-agent is not available in this session\n", "")
def check(self):
super(PublishRepo26Test, self).check()
# verify signatures
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly_passphrase.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly_passphrase.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
class PublishRepo27Test(BaseTest):
"""
publish repo: with udebs
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files} ${udebs}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick local-repo"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo27Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Release')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/Contents-i386.gz')
self.check_exists('public/dists/maverick/main/debian-installer/binary-i386/Release')
self.check_exists('public/dists/maverick/main/debian-installer/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/debian-installer/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/debian-installer/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/Contents-udeb-i386.gz')
self.check_exists('public/dists/maverick/main/source/Release')
self.check_exists('public/dists/maverick/main/source/Sources')
self.check_exists('public/dists/maverick/main/source/Sources.gz')
self.check_exists('public/dists/maverick/main/source/Sources.bz2')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_exists('public/pool/main/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('public/pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('public/pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
self.check_exists('public/pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_i386.udeb')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/main/debian-installer/binary-i386/Packages', 'udeb_binary', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
class PublishRepo28Test(BaseTest):
"""
publish repo: -skip-contents
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files} ${udebs}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick -skip-contents local-repo"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishRepo28Test, self).check()
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/main/binary-i386/Release')
self.check_not_exists('public/dists/maverick/main/Contents-i386.gz')
self.check_exists('public/dists/maverick/main/debian-installer/binary-i386/Release')
self.check_not_exists('public/dists/maverick/main/Contents-udeb-i386.gz')
class PublishRepo29Test(BaseTest):
"""
publish repo: broken .deb file for contents
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${testfiles}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick local-repo"
gold_processor = BaseTest.expand_environ
class PublishRepo30Test(BaseTest):
"""
publish repo: default (internal PGP implementation)
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick local-repo"
gold_processor = BaseTest.expand_environ
configOverride = {"gpgProvider": "internal"}
def check(self):
super(PublishRepo30Test, self).check()
# verify signatures
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
class PublishRepo31Test(BaseTest):
"""
publish repo: sign with passphrase (internal PGP implementation)
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
]
runCmd = "aptly publish repo -keyring=${files}/aptly_passphrase.pub -secret-keyring=${files}/aptly_passphrase.sec -passphrase=verysecret -distribution=maverick local-repo"
gold_processor = BaseTest.expand_environ
configOverride = {"gpgProvider": "internal"}
def outputMatchPrepare(_, s):
return re.sub(r' \d{4}-\d{2}-\d{2}', '', s)
def check(self):
super(PublishRepo31Test, self).check()
# verify signatures
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly_passphrase.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly_passphrase.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
|
|
import sys, unittest, socket
import inspect
from raygun4py import raygunmsgs
class TestRaygunMessageBuilder(unittest.TestCase):
def setUp(self):
self.builder = raygunmsgs.RaygunMessageBuilder({}).new()
self.request = {
"headers": {
"referer": "localhost",
"user-Agent": "Mozilla"
},
"hostName": "localhost",
"url": "/resource",
"httpMethod": "GET",
"ipAddress": "127.0.0.1",
"queryString": None,
"form": None,
"rawData": None
}
# https://www.python.org/dev/peps/pep-3333/#environ-variables
self.raw_wsgi_request = {
"HTTP_PRAGMA": "no-cache",
"HTTP_COOKIE": "test-cookie=foo",
"SCRIPT_NAME": "",
"REQUEST_METHOD": "GET",
"HTTP_HOST": "localhost:1234",
"PATH_INFO": "/resource-wsgi",
"SERVER_PROTOCOL": "HTTP/1.1",
"QUERY_STRING": "query=testme",
"HTTP_UPGRADE_INSECURE_REQUESTS": "1",
"HTTP_CACHE_CONTROL": "no-cache",
"HTTP_ACCEPT": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"HTTP_USER_AGENT": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",
"HTTP_REFERER": "https://www.google.com/",
"HTTP_CONNECTION": "keep-alive",
"SERVER_NAME": "localhost",
"REMOTE_ADDR": "127.0.0.1",
"HTTP_ACCEPT_LANGUAGE": "en-US,en;q=0.9",
"wsgi.url_scheme": "http",
"SERVER_PORT": "1234",
"HTTP_ACCEPT_ENCODING": "gzip, deflate, br"
}
def test_machinename(self):
self.builder.set_machine_name(socket.gethostname())
self.assertIsNotNone(self.builder.raygunMessage.details['machineName'])
def test_customdata(self):
self.builder.set_customdata({1: "one"})
self.assertIsInstance(self.builder.raygunMessage.details['userCustomData'], dict)
def test_tags(self):
self.builder.set_tags([1, 2, 3])
self.assertIsInstance(self.builder.raygunMessage.details['tags'], list)
def test_request_ip(self):
self.builder.set_request_details(self.request)
self.assertEqual(self.builder.raygunMessage.details['request']['iPAddress'], '127.0.0.1')
def test_request_ip_from_remote_addr(self):
self.builder.set_request_details(self.raw_wsgi_request)
self.assertEqual(self.builder.raygunMessage.details['request']['iPAddress'], '127.0.0.1')
def test_user_fname(self):
self.builder.set_user({
'firstName': 'Foo',
})
self.assertEqual(self.builder.raygunMessage.details['user']['firstName'], 'Foo')
def test_user_fullname(self):
self.builder.set_user({
'fullName': 'Foo Bar',
})
self.assertEqual(self.builder.raygunMessage.details['user']['fullName'], 'Foo Bar')
def test_user_email(self):
self.builder.set_user({
'email': '[email protected]',
})
self.assertEqual(self.builder.raygunMessage.details['user']['email'], '[email protected]')
def test_user_identifier(self):
self.builder.set_user({
'identifier': '[email protected]',
})
self.assertEqual(self.builder.raygunMessage.details['user']['identifier'], '[email protected]')
def test_user_anon(self):
self.builder.set_user({
'isAnonymous': False
})
self.assertEqual(self.builder.raygunMessage.details['user']['isAnonymous'], False)
def test_wsgi_fallbacks(self):
self.builder.set_request_details(self.raw_wsgi_request)
self.assertEqual(self.builder.raygunMessage.details['request']['hostName'], 'localhost:1234')
self.assertEqual(self.builder.raygunMessage.details['request']['url'], '/resource-wsgi')
self.assertEqual(self.builder.raygunMessage.details['request']['httpMethod'], 'GET')
self.assertEqual(self.builder.raygunMessage.details['request']['queryString'], 'query=testme')
def test_wsgi_standard_header_names(self):
self.builder.set_request_details(self.raw_wsgi_request)
self.assertEqual(self.builder.raygunMessage.details['request']['headers']['User-Agent'],
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36")
self.assertEqual(self.builder.raygunMessage.details['request']['headers']['Referer'],
"https://www.google.com/")
def test_set_request_details_allows_chaining(self):
self.builder \
.set_request_details(self.raw_wsgi_request) \
.set_tags(['foo', 'bar'])
def test_environment_variables(self):
self.builder.set_environment_details(None)
self.assertIsNotNone(self.builder.raygunMessage.details['environment']['environmentVariables'])
def test_environment_variables_are_ignored(self):
self.builder.options = {
'transmit_environment_variables': False
}
self.builder.set_environment_details(None)
self.assertIsNone(self.builder.raygunMessage.details['environment']['environmentVariables'])
class TestRaygunErrorMessage(unittest.TestCase):
class GrandchildError(Exception):
pass
class ChildError(Exception):
pass
class ParentError(Exception):
pass
def setUp(self):
try:
self.parent()
except Exception as e:
self.theException = e
exc_info = sys.exc_info()
self.msg = raygunmsgs.RaygunErrorMessage(exc_info[0], exc_info[1], exc_info[2], { 'transmitLocalVariables': True })
def parent(self):
try:
self.child()
except TestRaygunErrorMessage.ChildError as exc:
raise TestRaygunErrorMessage.ParentError("Parent message") from exc
def child(self):
try:
raise TestRaygunErrorMessage.GrandchildError("Grandchild message")
except Exception as ex:
raise TestRaygunErrorMessage.ChildError("Child message")
def test_exc_traceback_none_generates_empty_array(self):
error_message = raygunmsgs.RaygunErrorMessage(Exception, None, None, {})
self.assertEqual(error_message.stackTrace, [])
def test_classname(self):
self.assertEqual(self.msg.className, 'ParentError')
def test_chained_exception_message_parent_has_nested_child(self):
self.assertEqual(self.msg.innerError.className, 'ChildError')
pass
def test_chained_exception_message_child_has_nested_grandchild(self):
self.assertEqual(self.msg.innerError.innerError.className, 'GrandchildError')
pass
def test_chained_exception_nested_child_message(self):
self.assertEqual(self.msg.innerError.message, 'ChildError: Child message')
pass
def test_chained_exception_nested_grandchild_message(self):
self.assertEqual(self.msg.innerError.innerError.message, 'GrandchildError: Grandchild message')
pass
def test_chained_exception_last_exception_caught_is_parent(self):
self.assertIsInstance(self.theException.__context__, TestRaygunErrorMessage.ChildError)
def test_chained_exception_cause_is_child(self):
self.assertIsInstance(self.theException.__cause__, TestRaygunErrorMessage.ChildError)
def test_chained_exception_childs_cause_is_grandchild(self):
self.assertIsInstance(self.theException.__cause__.__context__, TestRaygunErrorMessage.GrandchildError)
def test_methodname_none(self):
original_getinnerframes = inspect.getinnerframes
inspect.getinnerframes = getinnerframes_mock_methodname_none
error_message = raygunmsgs.RaygunErrorMessage(int, 1, None, { "transmitLocalVariables": False })
self.assertEqual(error_message.__dict__['stackTrace'][0]['methodName'], None)
inspect.getinnerframes = original_getinnerframes
def getinnerframes_mock_methodname_none(exception):
return [(
'localVar',
'fileName',
'lineNumber',
'className',
None
)]
def main():
unittest.main()
if __name__ == '__main__':
main()
|
|
# python3
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model class definitions."""
import abc
import collections.abc
import datetime as dt
import json
import os
import pathlib
import shutil
import subprocess
import time
from googleapiclient import discovery
from googleapiclient import errors
import jinja2 as jinja
import tensorflow.compat.v1 as tf
from tensorflow.python.tools import saved_model_utils
from ml_pipeline_gen.parsers import parse_yaml
class BaseModel(abc.ABC):
"""Abstract class representing an ML model."""
def __init__(self, config_path, framework):
config = parse_yaml(config_path)
self._set_config(config)
self.ml_client = discovery.build("ml", "v1")
self.framework = framework
def _get_default_config(self):
return {
"model": {
"metrics": [],
},
}
def _deep_update(self, d, u):
"""Updates a dict and any nested dicts within it."""
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = self._deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
def _set_config(self, new_config):
"""Iterates through the config dict and sets instance variables."""
config = self._get_default_config()
config = self._deep_update(config, new_config)
for key in config:
setattr(self, key, config[key])
self._set_model_params(config)
# TODO(humichael): Validate config (required, metrics is one of...)
def _get_default_input_args(self, train_path, eval_path):
return {
"train_path": {
"type": "str",
"help": "Dir or bucket containing training data.",
"default": train_path,
},
"eval_path": {
"type": "str",
"help": "Dir or bucket containing eval data.",
"default": eval_path,
},
"model_dir": {
"type": "str",
"help": "Dir or bucket to save model files.",
"default": "models",
},
}
def _set_model_params(self, config):
"""Sets the input args and updates self.model_dir."""
model_params = (config["model_params"]
if "model_params" in config else {})
input_args = self._get_default_input_args(
train_path=config["data"]["train"],
eval_path=config["data"]["evaluation"],
)
if "input_args" in model_params:
new_input_args = model_params["input_args"]
input_args = self._deep_update(input_args, new_input_args)
self.model_params = model_params
self.model_params["input_args"] = input_args
self.model_dir = input_args["model_dir"]["default"]
# TODO(humichael): Move to utils
def get_parent(self, model=False, version="", job="",
operation=""):
"""Returns the parent to pass to the CAIP API.
Args:
model: true if the parent entity is a model.
version: a version name.
job: a job id.
operation: an operation name.
Returns:
a parent entity to pass to a CAIP API call. With no additional
parameters, a project is returned. However, setting any one of the
keyword args will change the retuned entity based on the set
parameter.
"""
parent = "projects/{}".format(self.project_id)
if version:
parent += "/models/{}/versions/{}".format(
self.model["name"], version)
elif model:
parent += "/models/{}".format(self.model["name"])
elif job:
parent += "/jobs/{}".format(job)
elif operation:
parent += "/operations/{}".format(operation)
return parent
# TODO(humichael): Move to utils
def _call_ml_client(self, request, silent_fail=False):
"""Calls the CAIP API by executing the given request.
Args:
request: an API request built using self.ml_client.
silent_fail: does not raise errors if True.
Returns:
response: a dict representing either the response of a successful
call or the error message of an unsuccessful call.
success: True if the API call was successful.
Raises:
HttpError: when API call fails and silent_fail is set to False.
"""
try:
response = request.execute()
success = True
except errors.HttpError as err:
if not silent_fail:
raise err
# pylint: disable=protected-access
response = {"error": err._get_reason()}
success = False
return response, success
def _write_template(self, env, template_path, args, dest):
template = env.get_template(template_path)
body = template.render(**args)
with open(dest, "w+") as f:
f.write(body)
def _write_static(self):
"""Copies static files to the working directory."""
root_dir = pathlib.Path(__file__).parent.resolve()
for d in root_dir.joinpath("static").iterdir():
if d.is_dir() and not os.path.exists(d.stem):
shutil.copytree(d, d.stem)
# TODO(humichael): find way to avoid using relative paths.
@abc.abstractmethod
def generate_files(self, task_template_path,
model_template_path, inputs_template_path):
"""Use Jinja templates to generate model training code.
Args:
task_template_path: path to task.py template.
model_template_path: path to model.py template.
inputs_template_path: path to inputs.py template.
"""
loader = jinja.PackageLoader("ml_pipeline_gen", "templates")
env = jinja.Environment(loader=loader, trim_blocks=True,
lstrip_blocks="True")
task_args = {
"input_args": self.model_params["input_args"],
}
model_args = {
"model_path": self.model["path"],
"metrics": self.model["metrics"],
}
inputs_args = {
"schema": json.dumps(self.data["schema"], indent=4),
"target": self.model["target"],
}
setup_args = {"package_name": self.package_name}
self._write_static()
self._write_template(env, task_template_path, task_args,
"trainer/task.py")
self._write_template(env, model_template_path, model_args,
"trainer/model.py")
self._write_template(env, inputs_template_path, inputs_args,
"trainer/inputs.py")
self._write_template(env, "setup.py", setup_args, "setup.py")
def get_job_dir(self):
"""Returns the GCS path to the job dir."""
return os.path.join("gs://", self.bucket_id, self.model["name"])
def get_model_dir(self):
"""Returns the GCS path to the model dir."""
return os.path.join(self.get_job_dir(), self.model_dir)
def _get_best_trial(self, job_id):
"""Returns the best trial id for a training job.
Args:
job_id: a CAIP job id.
Returns:
the trial number that performed the best.
"""
name = self.get_parent(job=job_id)
request = self.ml_client.projects().jobs().get(name=name).execute()
best_trial = "1"
if "trials" in request["trainingOutput"]:
best_trial = request["trainingOutput"]["trials"][0]["trialId"]
return best_trial
def _get_deployment_dir(self, job_id):
"""Returns the GCS path to the Sklearn exported model.
Args:
job_id: a CAIP job id.
"""
best_trial = self._get_best_trial(job_id)
output_path = os.path.join(self.get_model_dir(), best_trial)
return output_path
def _get_staging_dir(self):
"""Returns the GCS path to the staging dir."""
return os.path.join(
"gs://", self.bucket_id, self.model["name"], "staging")
# TODO(humichael): Move to utils.py
def upload_trainer_dist(self):
"""Builds a source distribution and uploads it to GCS."""
dist_dir = "dist"
dist_file = "{}-1.0.tar.gz".format(self.package_name)
staging_dir = self._get_staging_dir()
subprocess.call(["python", "setup.py", "sdist"],
stdout=open(os.devnull, "wb"))
if not tf.io.gfile.exists(staging_dir):
tf.io.gfile.makedirs(staging_dir)
src = os.path.join(dist_dir, dist_file)
dst = os.path.join(staging_dir, dist_file)
tf.io.gfile.copy(src, dst, overwrite=True)
return dst
def _upload_metadata(self, path):
"""Uploads the metadata file necessary to run CAIP explanations.
Args:
path: GCS path to the model's *.pb directory
"""
inputs, outputs = {}, {}
meta_graph = saved_model_utils.get_meta_graph_def(path, "serve")
signature_def_key = "serving_default"
inputs_tensor_info = meta_graph.signature_def[
signature_def_key].inputs
outputs_tensor_info = meta_graph.signature_def[
signature_def_key].outputs
for feat, input_tensor in sorted(inputs_tensor_info.items()):
inputs[feat] = {"input_tensor_name": input_tensor.name}
for label, output_tensor in sorted(outputs_tensor_info.items()):
outputs[label] = {"output_tensor_name": output_tensor.name}
explanation_metadata = {
"inputs": inputs,
"outputs": outputs,
"framework": "tensorflow"
}
file_name = "explanation_metadata.json"
with open(file_name, "w+") as output_file:
json.dump(explanation_metadata, output_file)
dst = os.path.join(path, file_name)
tf.io.gfile.copy(file_name, dst, overwrite=True)
def _wait_until_done(self, job_id, wait_interval=60):
"""Blocks until the given job is completed.
Args:
job_id: a CAIP job id.
wait_interval: the amount of seconds to wait after checking the job
state.
Raises:
RuntimeError: if the job does not succeed.
"""
state = ""
end_states = ["SUCCEEDED", "FAILED", "CANCELLED"]
jobs_client = self.ml_client.projects().jobs()
print(
"Waiting for {} to complete. Checking every {} seconds.".format(
job_id, wait_interval))
while state not in end_states:
time.sleep(wait_interval)
request = jobs_client.get(name=self.get_parent(job=job_id))
response, _ = self._call_ml_client(request)
state = response["state"]
print("Job state of {}: {}".format(job_id, state))
if state != "SUCCEEDED":
raise RuntimeError(
"Job didn't succeed. End state: {}".format(state))
def train(self, tune=False, blocking=True, wait_interval=60):
"""Trains on CAIP.
Args:
tune: train with hyperparameter tuning if true.
blocking: true if the function should exit only once the job
completes.
wait_interval: if blocking, how often the job state should be
checked.
Returns:
job_id: a CAIP job id.
"""
now = dt.datetime.now().strftime("%Y%m%d_%H%M%S")
job_id = "train_{}_{}".format(self.model["name"], now)
package_uri = self.upload_trainer_dist()
jobs_client = self.ml_client.projects().jobs()
body = {
"jobId": job_id,
"trainingInput": {
"scaleTier": self.scale_tier,
"packageUris": [package_uri],
"pythonModule": "trainer.task",
"args": [
"--model_dir", self.get_model_dir(),
],
"jobDir": self.get_job_dir(),
"region": self.region,
"runtimeVersion": self.runtime_version,
"pythonVersion": self.python_version,
},
}
# TODO(smhosein): should we handle custom scale tiers?
if tune:
hp_config = parse_yaml(self.model_params["hyperparam_config"])
hyperparams = hp_config["trainingInput"]["hyperparameters"]
body["trainingInput"]["hyperparameters"] = hyperparams
request = jobs_client.create(parent=self.get_parent(),
body=body)
self._call_ml_client(request)
if blocking:
self._wait_until_done(job_id, wait_interval)
return job_id
def train_local(self):
"""Trains the model locally."""
subprocess.call("bin/run.local_train.sh")
def _create_model(self):
"""Creates a model for serving on CAIP."""
models_client = self.ml_client.projects().models()
body = {
"name": self.model["name"],
"regions": [self.region],
"onlinePredictionLogging": True,
}
request = models_client.create(
parent=self.get_parent(), body=body)
self._call_ml_client(request)
def _wait_until_op_done(self, op_name, wait_interval=30):
"""Blocks until the given Operation is completed.
Args:
op_name: a CAIP Operation name.
wait_interval: the amount of seconds to wait after checking the
state.
"""
done = False
op_client = self.ml_client.projects().operations()
print(
"Waiting for {} to complete. Checking every {} seconds.".format(
op_name, wait_interval))
while not done:
time.sleep(wait_interval)
request = op_client.get(
name=self.get_parent(operation=op_name))
response, _ = self._call_ml_client(request)
done = "done" in response and response["done"]
print("Operation {} completed: {}".format(op_name, done))
def _create_version(self, version, job_id, explanations,
wait_interval=30):
"""Creates a new version of the model for serving.
Args:
version: a version number to use to create a version name.
job_id: a CAIP job id.
explanations: whether to create a model that can perform
CAIP explanations.
wait_interval: if blocking, how often the job state should be
checked.
Returns:
the name of the version just created.
"""
versions_client = self.ml_client.projects().models().versions()
name = "{}_{}".format(self.model["name"], version)
body = {
"name": name,
"deploymentUri": self._get_deployment_dir(job_id),
"runtimeVersion": self.runtime_version,
"framework": self.get_deploy_framework(),
"pythonVersion": self.python_version,
"machineType": self.machine_type_pred,
}
if explanations:
exp = self.model_params["explain_output"]
exp_pm = exp["explain_param"]
body["explanationConfig"] = {
exp["explain_type"]: {exp_pm["name"]: exp_pm["value"]}
}
self._upload_metadata(self._get_deployment_dir(job_id))
request = versions_client.create(
parent=self.get_parent(model=True), body=body)
op, _ = self._call_ml_client(request)
op_name = op["name"].split("/")[-1]
self._wait_until_op_done(op_name, wait_interval)
return name
def get_versions(self):
"""Returns the model versions if a model exists.
Returns:
response: the API response if a model exists, otherwise an object
containing the error message.
model_exists: True if a deployed model exists.
"""
versions_client = self.ml_client.projects().models().versions()
request = versions_client.list(
parent=self.get_parent(model=True))
response, model_exists = self._call_ml_client(request,
silent_fail=True)
return response, model_exists
@abc.abstractmethod
def get_deploy_framework(self):
pass
def deploy(self, job_id, explanations=False):
"""Deploys model and returns the version name created.
Args:
job_id: a CAIP job id.
explanations: whether to create a model that can perform
CAIP explanations
Returns:
the name of the version just created.
"""
response, model_exists = self.get_versions()
if model_exists:
if response:
versions = [int(version["name"].split("_")[-1])
for version in response["versions"]]
version = max(versions) + 1
else:
version = 1
else:
self._create_model()
version = 0
return self._create_version(version, job_id, explanations)
# TODO(humichael): Add option to pass in csv/json file.
def online_predict(self, inputs, version=""):
"""Uses a deployed model to get predictions for the given inputs.
Args:
inputs: a list of feature vectors.
version: the version name of the deployed model to use. If none is
provided, the default version will be used.
Returns:
a list of predictions.
Raises:
RuntimeError: if the deployed model fails to make predictions.
"""
name = self.get_parent(model=True)
if version:
name = self.get_parent(version=version)
projects_client = self.ml_client.projects()
request = projects_client.predict(name=name,
body={"instances": inputs})
response, _ = self._call_ml_client(request)
if "predictions" in response:
return response["predictions"]
print(response)
raise RuntimeError("Prediction failed.")
def online_explanations(self, inputs, version=""):
"""Uses a deployed model to get explanations for the given inputs.
Args:
inputs: a list of feature vectors.
version: the version name of the deployed model to use. If none is
provided, the default version will be used.
Returns:
a list of explanations.
Raises:
RuntimeError: if the deployed model fails to make explanations.
"""
name = self.get_parent(model=True)
if version:
name = self.get_parent(version=version)
projects_client = self.ml_client.projects()
request = projects_client.explain(name=name,
body={"instances": inputs})
response, _ = self._call_ml_client(request)
if "explanations" in response:
return response["explanations"]
print(response)
raise RuntimeError("Explanations failed.")
# TODO(humichael): Move to utils.py
def upload_pred_input_data(self, src):
"""Uploads input data to GCS for prediction."""
inputs_dir = os.path.join(self.get_job_dir(), "inputs")
if not tf.io.gfile.exists(inputs_dir):
tf.io.gfile.makedirs(inputs_dir)
src_name = os.path.basename(src)
dst = os.path.join(inputs_dir, src_name)
tf.io.gfile.copy(src, dst, overwrite=True)
return dst
def get_pred_output_path(self):
"""Returns the path prediction outputs are written to."""
return os.path.join(self.get_job_dir(), "outputs")
def supports_batch_predict(self):
"""Returns True if CAIP supports batch prediction for this model."""
return True
def batch_predict(self, job_id="", version="", blocking=True,
wait_interval=60):
"""Uses a deployed model on GCS to create a prediction job.
Note: Batch prediction only supports Tensorflow models.
Args:
job_id: the job_id of a training job to use for batch prediction.
version: the version name of the deployed model to use. If none is
provided, the default version will be used.
blocking: true if the function should exit only once the job
completes.
wait_interval: if blocking, how often the job state should be
checked.
Returns:
job_id: a CAIP job id.
Raises:
RuntimeError: if batch prediction is not supported.
"""
if not self.supports_batch_predict():
raise RuntimeError("Batch predict not supported for this model.")
pred_info = self.data["prediction"]
inputs = pred_info["input_data_paths"]
if not isinstance(inputs, list):
inputs = [inputs]
input_format = (pred_info["input_format"] if "input_format" in pred_info
else "DATA_FORMAT_UNSPECIFIED")
output_format = (pred_info["output_format"]
if "output_format" in pred_info else "JSON")
now = dt.datetime.now().strftime("%Y%m%d_%H%M%S")
predict_id = "predict_{}_{}".format(self.model["name"], now)
jobs_client = self.ml_client.projects().jobs()
body = {
"jobId": predict_id,
"predictionInput": {
"dataFormat": input_format,
"outputDataFormat": output_format,
"inputPaths": inputs,
"maxWorkerCount": "10",
"region": self.region,
"batchSize": "64",
"outputPath": self.get_pred_output_path(),
},
}
if job_id:
body["predictionInput"]["uri"] = self._get_deployment_dir(job_id)
body["predictionInput"]["runtimeVersion"] = self.runtime_version
elif version:
version = self.get_parent(version=version)
body["predictionInput"]["versionName"] = version
else:
model = self.get_parent(model=True)
body["predictionInput"]["modelName"] = model
request = jobs_client.create(parent=self.get_parent(),
body=body)
self._call_ml_client(request)
if blocking:
self._wait_until_done(predict_id, wait_interval)
return predict_id
# TODO(humichael): clean up with python code, not a shell script.
def clean_up(self):
"""Delete all generated files."""
subprocess.call("bin/cleanup.sh")
class SklearnModel(BaseModel):
"""SklearnModel class."""
def __init__(self, config):
super(SklearnModel, self).__init__(config, "sklearn")
def _get_default_input_args(self, train_path, eval_path):
args = super(SklearnModel, self)._get_default_input_args(
train_path, eval_path)
additional_args = {
"cross_validations": {
"type": "int",
"help": "Number of datasets to split to for cross validation.",
"default": 3,
},
}
args.update(additional_args)
return args
def generate_files(self):
super(SklearnModel, self).generate_files(
"sklearn_task.py", "sklearn_model.py", "sklearn_inputs.py")
def get_deploy_framework(self):
return "SCIKIT_LEARN"
def supports_batch_predict(self):
"""Returns True if CAIP supports batch prediction for this model."""
return False
class TFModel(BaseModel):
"""TFModel class."""
def __init__(self, config):
super(TFModel, self).__init__(config, "tensorflow")
def _get_default_input_args(self, train_path, eval_path):
args = super(TFModel, self)._get_default_input_args(
train_path, eval_path)
additional_args = {
"batch_size": {
"type": "int",
"help": "Number of rows of data fed to model each iteration.",
"default": 64,
},
"num_epochs": {
"type": "int",
"help": "Number of times to iterate over the dataset.",
},
"max_steps": {
"type": "int",
"help": "Maximum number of iterations to train the model for.",
"default": 500,
},
"learning_rate": {
"type": "float",
"help": "Model learning rate.",
"default": 0.0001,
},
"export_format": {
"type": "str",
"help": "File format expected at inference time.",
"default": "json",
},
"save_checkpoints_steps": {
"type": "int",
"help": "Steps to run before saving a model checkpoint.",
"default": 100,
},
"keep_checkpoint_max": {
"type": "int",
"help": "Number of model checkpoints to keep.",
"default": 2,
},
"log_step_count_steps": {
"type": "int",
"help": "Steps to run before logging training performance.",
"default": 100,
},
"eval_steps": {
"type": "int",
"help": "Number of steps to use to evaluate the model.",
"default": 20,
},
"early_stopping_steps": {
"type": "int",
"help": "Steps with no loss decrease before stopping early.",
"default": 1000,
},
}
args.update(additional_args)
return args
def generate_files(self):
super(TFModel, self).generate_files(
"tf_task.py", "tf_model.py", "tf_inputs.py")
# TODO(humichael): Support multiple model dirs.
def train(self, tune=False, blocking=True, wait_interval=60):
"""Removes any previous checkpoints before training."""
if tf.io.gfile.exists(self.get_model_dir()):
tf.gfile.DeleteRecursively(self.get_model_dir())
return super(TFModel, self).train(tune, blocking, wait_interval)
def get_deploy_framework(self):
return "TENSORFLOW"
def _get_deployment_dir(self, job_id):
"""Returns the GCS path to the Sklearn exported model.
Args:
job_id: a CAIP job id.
"""
best_trial = self._get_best_trial(job_id)
output_path = os.path.join(
self.get_model_dir(), best_trial, "export", "export")
return str(subprocess.check_output(
["gsutil", "ls", output_path]).strip()).split("\\n")[-1].strip("'")
class XGBoostModel(BaseModel):
"""XGBoost class."""
def __init__(self, config):
super(XGBoostModel, self).__init__(config, "xgboost")
def _get_default_input_args(self, train_path, eval_path):
args = super(XGBoostModel, self)._get_default_input_args(
train_path, eval_path)
additional_args = {
"max_depth": {
"type": "int",
"help": "Maximum depth of the XGBoost tree.",
"default": 3,
},
"n_estimators": {
"type": "int",
"help": "Number of estimators to be created.",
"default": 2,
},
"booster": {
"type": "str",
"help": "which booster to use: gbtree, gblinear or dart.",
"default": "gbtree",
},
"min_child_weight": {
"type": "int",
"help": ("Minimum sum of instance weight (hessian) needed in a "
"child."),
"default": 1,
},
"learning_rate": {
"type": "float",
"help": ("Step size shrinkage used in update to prevents "
"overfitting."),
"default": 0.3,
},
"gamma": {
"type": "int",
"help": ("Minimum loss reduction required to make a further "
"partition on a leaf node of the tree."),
"default": 0,
},
"subsample": {
"type": "int",
"help": "Subsample ratio of the training instances.",
"default": 1,
},
"colsample_bytree": {
"type": "int",
"help": ("subsample ratio of columns when constructing each "
"tree."),
"default": 1,
},
"reg_alpha": {
"type": "int",
"help": ("L1 regularization term on weights. Increasing this "
"value will make model more conservative."),
"default": 0,
},
"num_classes": {
"type": "int",
"help": "Number of output labels must be in [0, num_class).",
"default": 1,
},
}
args.update(additional_args)
return args
def generate_files(self):
super(XGBoostModel, self).generate_files(
"xgboost_task.py", "xgboost_model.py", "xgboost_inputs.py")
def get_deploy_framework(self):
return "XGBOOST"
def supports_batch_predict(self):
"""Returns True if CAIP supports batch prediction for this model."""
return False
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six import moves
import testtools
from testtools import matchers
from neutron.common import exceptions as exc
from neutron.db import api as db
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_vxlan
from neutron.tests.unit import testlib_api
TUNNEL_IP_ONE = "10.10.10.10"
TUNNEL_IP_TWO = "10.10.10.20"
TUN_MIN = 100
TUN_MAX = 109
TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
INVALID_VXLAN_VNI = 7337
VXLAN_UDP_PORT_ONE = 9999
VXLAN_UDP_PORT_TWO = 8888
class TunnelTypeTestMixin(object):
DRIVER_CLASS = None
TYPE = None
def setUp(self):
super(TunnelTypeTestMixin, self).setUp()
self.driver = self.DRIVER_CLASS()
self.driver.tunnel_ranges = TUNNEL_RANGES
self.driver.sync_allocations()
self.session = db.get_session()
def test_tunnel_type(self):
self.assertEqual(self.TYPE, self.driver.get_type())
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'phys_net',
api.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
segment[api.PHYSICAL_NETWORK] = None
self.driver.validate_provider_segment(segment)
segment[api.SEGMENTATION_ID] = 1
self.driver.validate_provider_segment(segment)
def test_sync_tunnel_allocations(self):
self.assertIsNone(
self.driver.get_allocation(self.session, (TUN_MIN - 1)))
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MIN)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MIN + 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MAX - 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MAX)).allocated)
self.assertIsNone(
self.driver.get_allocation(self.session, (TUN_MAX + 1)))
self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES
self.driver.sync_allocations()
self.assertIsNone(
self.driver.get_allocation(self.session, (TUN_MIN + 5 - 1)))
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MIN + 5)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session,
(TUN_MIN + 5 + 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session,
(TUN_MAX + 5 - 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MAX + 5)).allocated)
self.assertIsNone(
self.driver.get_allocation(self.session, (TUN_MAX + 5 + 1)))
def test_partial_segment_is_partial_segment(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: None}
self.assertTrue(self.driver.is_partial_segment(segment))
def test_specific_segment_is_not_partial_segment(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: 101}
self.assertFalse(self.driver.is_partial_segment(segment))
def test_reserve_provider_segment_full_specs(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: 101}
observed = self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_allocation(self.session,
observed[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
with testtools.ExpectedException(exc.TunnelIdInUse):
self.driver.reserve_provider_segment(self.session, segment)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_allocation(self.session,
observed[api.SEGMENTATION_ID])
self.assertFalse(alloc.allocated)
segment[api.SEGMENTATION_ID] = 1000
observed = self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_allocation(self.session,
observed[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_allocation(self.session,
observed[api.SEGMENTATION_ID])
self.assertIsNone(alloc)
def test_reserve_provider_segment(self):
tunnel_ids = set()
specs = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: None}
for x in moves.xrange(TUN_MIN, TUN_MAX + 1):
segment = self.driver.reserve_provider_segment(self.session,
specs)
self.assertEqual(self.TYPE, segment[api.NETWORK_TYPE])
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
with testtools.ExpectedException(exc.NoNetworkAvailable):
segment = self.driver.reserve_provider_segment(self.session,
specs)
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.session, segment)
segment = self.driver.reserve_provider_segment(self.session, specs)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.session, segment)
def test_allocate_tenant_segment(self):
tunnel_ids = set()
for x in moves.xrange(TUN_MIN, TUN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
segment = self.driver.allocate_tenant_segment(self.session)
self.assertIsNone(segment)
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.session, segment)
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.session, segment)
class VxlanTypeTest(TunnelTypeTestMixin, testlib_api.SqlTestCase):
DRIVER_CLASS = type_vxlan.VxlanTypeDriver
TYPE = p_const.TYPE_VXLAN
def test_endpoints(self):
# Set first endpoint, verify it gets VXLAN VNI 1
vxlan1_endpoint = self.driver.add_endpoint(TUNNEL_IP_ONE,
VXLAN_UDP_PORT_ONE)
self.assertEqual(TUNNEL_IP_ONE, vxlan1_endpoint.ip_address)
self.assertEqual(VXLAN_UDP_PORT_ONE, vxlan1_endpoint.udp_port)
# Set second endpoint, verify it gets VXLAN VNI 2
vxlan2_endpoint = self.driver.add_endpoint(TUNNEL_IP_TWO,
VXLAN_UDP_PORT_TWO)
self.assertEqual(TUNNEL_IP_TWO, vxlan2_endpoint.ip_address)
self.assertEqual(VXLAN_UDP_PORT_TWO, vxlan2_endpoint.udp_port)
# Get all the endpoints
endpoints = self.driver.get_endpoints()
for endpoint in endpoints:
if endpoint['ip_address'] == TUNNEL_IP_ONE:
self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint['udp_port'])
elif endpoint['ip_address'] == TUNNEL_IP_TWO:
self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port'])
def test_add_same_ip_endpoints(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, VXLAN_UDP_PORT_ONE)
with mock.patch.object(type_vxlan.LOG, 'warning') as log_warn:
observed = self.driver.add_endpoint(TUNNEL_IP_ONE,
VXLAN_UDP_PORT_TWO)
self.assertEqual(VXLAN_UDP_PORT_ONE, observed['udp_port'])
log_warn.assert_called_once_with(mock.ANY, TUNNEL_IP_ONE)
class TunnelTypeMultiRangeTestMixin(object):
DRIVER_CLASS = None
TUN_MIN0 = 100
TUN_MAX0 = 101
TUN_MIN1 = 200
TUN_MAX1 = 201
TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)]
def setUp(self):
super(TunnelTypeMultiRangeTestMixin, self).setUp()
self.driver = self.DRIVER_CLASS()
self.driver.tunnel_ranges = self.TUNNEL_MULTI_RANGES
self.driver.sync_allocations()
self.session = db.get_session()
def test_release_segment(self):
segments = [self.driver.allocate_tenant_segment(self.session)
for i in range(4)]
# Release them in random order. No special meaning.
for i in (0, 2, 1, 3):
self.driver.release_segment(self.session, segments[i])
for key in (self.TUN_MIN0, self.TUN_MAX0,
self.TUN_MIN1, self.TUN_MAX1):
alloc = self.driver.get_allocation(self.session, key)
self.assertFalse(alloc.allocated)
class VxlanTypeMultiRangeTest(TunnelTypeMultiRangeTestMixin,
testlib_api.SqlTestCase):
DRIVER_CLASS = type_vxlan.VxlanTypeDriver
|
|
from __future__ import annotations
import io
from typing import Union, Optional, List
from elma.lgr import LGR, LGR_Image
from elma.constants import LGR_MANDATORY_FILES
from elma.constants import LGR_LIMITED_SIZE_FILES
__all__ = ["check_LGR_error"]
LGR_PCX_MIN = 10 # unused as practically never important
LGR_PCX_MAX = 3500 # unused as practically never important
LGR_PCX_FILESIZE_MIN = 1 # unused as practically never important
LGR_PCX_FILESIZE_MAX = 10000000
LGR_PIC_MAX = 999
LGR_PIC_WIDTH_MAX = 6000
LGR_PIC_SIZE_MAX = 600000
LGR_TEX_MIN = 2
LGR_TEX_MAX = 99
LGR_MASK_MAX = 199
LGR_GRASS_MAX = 99
LGR_OBJ_WIDTH = 40
LGR_OBJ_HEIGHT = 40
LGR_OBJ_FRAMES_MAX = 1000
LGR_RECOMMEND_BIKE_WIDTH = 354
LGR_RECOMMEND_BIKE_HEIGHT = 298
LGR_WARNING_GRASS_HEIGHT_MIN = 41
LGR_WARNING_QCOLORS_WIDTH = 66
LGR_WARNING_QCOLORS_HEIGHT = 109
ERR_LGR_INVALID_PALETTE = 1
ERR_LGR_MISSING_MANDATORY_FILE = 2
ERR_DUPLICATE_NAME = 3
ERR_TOO_MANY_GRASS = 4
ERR_TOO_MANY_TEXTURES = 5
ERR_NOT_ENOUGH_TEXTURES = 6
ERR_TOO_MANY_PICTURES = 7
ERR_TOO_MANY_MASKS = 8
WARN_UNUSED_QFOOD = 501
ERR_FILE_TOO_LARGE = 1001
ERR_NAME_TOO_LONG = 1002
ERR_NAME_MISSING = 1003
ERR_PADDING_INVALID = 1004
ERR_IMAGE_TYPE_INVALID = 1005
ERR_DISTANCE_INVALID = 1006
ERR_TRANSPARENCY_INVALID = 1007
ERR_PIC_TOO_WIDE = 1008
ERR_PIC_TOO_MANY_PIXELS = 1009
ERR_OBJ_WIDTH_INVALID = 1010
ERR_OBJ_TOO_WIDE = 1011
ERR_SMALL_IMAGE_TOO_LARGE = 1012
ERR_IMAGE_INVALID_PALETTE = 1013
ERR_IMG_MISSING = 1014
ERR_CLIPPING_INVALID = 1015
WARN_OBJ_HEIGHT_INVALID = 5001
WARN_GRASS_HEIGHT_TOO_SMALL = 5002
WARN_PALETTE_MISMATCH = 5003
WARN_QCOLORS_WRONG_SIZE = 5004
WARN_QBIKE_TOO_SMALL = 5005
def check_LGR_error(lgro: Union[LGR, LGR_Image], palette: Optional[List[int]] = None) -> List[List]:
"""
Returns a list of errors or warnings for an LGR object or LGR_Image object.
If you pass an LGR_Image object, you can also put a palette that the object
should have
"""
message: List[List] = []
if isinstance(lgro, LGR):
use_palette = None
if(len(lgro.palette) == 768 and
max(lgro.palette) <= 255 and
min(lgro.palette) >= 0):
use_palette = lgro.palette
else:
message.append([
ERR_LGR_INVALID_PALETTE,
None,
"The LGR file has an invalid palette! Please set a palette "
"using get_palette() on one of the images or using "
"LGR_Image.default_palette()"])
n_pic = 0
n_tex = 0
n_mask = 0
n_grass = 0
apples = [False] * 9
for item in LGR_MANDATORY_FILES:
n_count = 0
for obj in lgro.images:
if item == obj.name.lower():
n_count = 1
break
if n_count == 0:
message.append([
ERR_LGR_MISSING_MANDATORY_FILE,
item,
"The LGR file is missing a mandatory file: %s" % item])
len_lgr = len(lgro.images)
for i in range(len_lgr):
namelower = lgro.images[i].name.lower()
if lgro.images[i].is_in_pictures_lst():
if lgro.images[i].is_qup_qdown():
n_grass += 1
elif lgro.images[i].is_food():
apples[int(namelower[5:6])-1] = True
elif lgro.images[i].image_type == LGR_Image.PICTURE:
n_pic += 1
elif lgro.images[i].image_type == LGR_Image.TEXTURE:
n_tex += 1
elif lgro.images[i].image_type == LGR_Image.MASK:
n_mask += 1
else:
if namelower == "qgrass":
n_tex += 1
for j in range(i+1, len_lgr, 1):
if namelower == lgro.images[j].name.lower():
message.append([
ERR_DUPLICATE_NAME,
lgro.images[j],
"The LGR file has a duplicate of the "
"following filename: %s" % lgro.images[j].name])
message_temp = check_LGR_error(lgro.images[i], use_palette)
if message_temp:
message.extend(message_temp)
if n_grass > LGR_GRASS_MAX:
message.append([
ERR_TOO_MANY_GRASS,
n_grass,
"The LGR file has %s grass images but "
"can only support %s at most" % (n_grass, LGR_GRASS_MAX)])
if n_pic > LGR_PIC_MAX:
message.append([
ERR_TOO_MANY_PICTURES,
n_pic,
"The LGR file has %s picture images but "
"can only support %s at most" % (n_pic, LGR_PIC_MAX)])
if n_tex < LGR_TEX_MIN:
message.append([
ERR_NOT_ENOUGH_TEXTURES,
n_tex,
"The LGR file has %s texture images but "
"needs at least %s" % (n_tex, LGR_TEX_MIN)])
elif n_tex > LGR_TEX_MAX:
message.append([
ERR_TOO_MANY_TEXTURES,
n_tex,
"The LGR file has %s texture image(s) but "
"can only support %s at most" % (n_tex, LGR_TEX_MAX)])
if n_mask > LGR_MASK_MAX:
message.append([
ERR_TOO_MANY_MASKS,
n_mask,
"The LGR file has %s mask images but "
"can only support %s at most" % (n_mask, LGR_MASK_MAX)])
appleFinished = False
for i in range(9):
if apples[i]:
if appleFinished:
message.append([
WARN_UNUSED_QFOOD,
None,
"Warning: qfood%s will not appear in "
"the game as qfood%s is missing" % (i+1, i)])
else:
appleFinished = True
return message
elif isinstance(lgro, LGR_Image):
is_mask = False
len_name = len(lgro.name)
if len_name > 8:
message.append([
ERR_NAME_TOO_LONG,
lgro,
("The name of %s is too long "
"(maximum 8 characters)") % lgro.name])
if len_name == 0:
message.append([
ERR_NAME_MISSING,
lgro,
"An LGR_Image has no name! %s" % lgro])
try:
if len(bytes(lgro.padding)) != 7:
message.append([
ERR_PADDING_INVALID,
lgro,
"%s's padding is invalid (must be an "
"array of 7 ints in range (0-255))" % lgro.name])
except(ValueError, TypeError):
message.append([
ERR_PADDING_INVALID,
lgro,
"%s's padding is invalid (must be an "
"array of 7 ints in range (0-255))" % lgro.name])
namelower = lgro.name.lower()
if lgro.is_object():
if lgro.img.height != LGR_OBJ_HEIGHT:
message.append([
WARN_OBJ_HEIGHT_INVALID,
lgro,
"Warning: %s should have a height of %s, "
"but the height is %s. If the height is "
"smaller, the program might crash unexpectedly. "
"If the height is larger, the additional columns "
"will be ignored." % (
lgro.name,
LGR_OBJ_HEIGHT,
lgro.img.height)])
if lgro.img.width % LGR_OBJ_WIDTH != 0:
message.append([
ERR_OBJ_WIDTH_INVALID,
lgro,
"As %s is an object, the width (%s) must be a "
"multiple of %s" % (
lgro.name,
lgro.img.width,
LGR_OBJ_WIDTH)])
if lgro.img.width > LGR_OBJ_WIDTH*LGR_OBJ_FRAMES_MAX:
message.append([
ERR_OBJ_TOO_WIDE,
lgro,
"%s must have a width at most %s, "
"but the width is %s" % (
lgro.name,
lgro.img.width,
LGR_OBJ_WIDTH * LGR_OBJ_FRAMES_MAX)])
elif lgro.is_in_pictures_lst():
if lgro.is_qup_qdown():
if lgro.img.height < LGR_WARNING_GRASS_HEIGHT_MIN:
message.append([
WARN_GRASS_HEIGHT_TOO_SMALL,
lgro,
"Warning: %s should have a minimum "
"height of %s, but the height is %s" % (
lgro.name,
LGR_WARNING_GRASS_HEIGHT_MIN,
lgro.img.height)])
else:
if 100 > lgro.image_type < 102:
message.append([
ERR_IMAGE_TYPE_INVALID,
lgro,
"%s's image_type is invalid" % lgro.name])
else:
if lgro.image_type == LGR_Image.PICTURE:
if lgro.img.width > LGR_PIC_WIDTH_MAX:
message.append([
ERR_PIC_TOO_WIDE,
lgro,
"%s is too wide (%s) - the maximum is %s" %
(lgro.name,
lgro.img.width,
LGR_PIC_WIDTH_MAX)])
if(lgro.img.width * lgro.img.height >
LGR_PIC_SIZE_MAX):
message.append([
ERR_PIC_TOO_MANY_PIXELS,
lgro,
"%s has too many pixels (%s) - the "
"maximum is %s. Depending on the image, "
"the game might crash" % (
lgro.name,
lgro.img.width * lgro.img.height,
LGR_PIC_SIZE_MAX)])
elif lgro.image_type == LGR_Image.MASK:
is_mask = True
if not(1 <= lgro.default_distance <= 999) and not is_mask:
message.append([
ERR_DISTANCE_INVALID,
lgro,
"%s's distance (%s) is invalid "
"(must be integer between 1-999)" % (
lgro.name,
lgro.default_distance)])
if not(0 <= lgro.default_clipping <= 2) and not is_mask:
message.append([
ERR_CLIPPING_INVALID,
lgro,
"%s's clipping (%s) is invalid" % (
lgro.name,
lgro.default_clipping)])
if not(11 <= lgro.transparency <= 15):
message.append([
ERR_TRANSPARENCY_INVALID,
lgro,
"%s's transparency (%s) is invalid" % (
lgro.name,
lgro.transparency)])
if lgro.img:
with io.BytesIO() as f:
lgro.save_PCX(f)
size = f.tell()
if size > LGR_PCX_FILESIZE_MAX:
message.append([
ERR_FILE_TOO_LARGE,
lgro,
"The file produced by %s is %s bytes in size, "
"over the limit of %s" % (
lgro.name,
size,
LGR_PCX_FILESIZE_MAX)])
if namelower in LGR_LIMITED_SIZE_FILES and (lgro.img.width > 255 or lgro.img.height > 255):
message.append([
ERR_SMALL_IMAGE_TOO_LARGE,
lgro,
"%s must have dimensions at most 255x255, "
"but the dimensions are %sx%s" % (
lgro.name,
lgro.img.width,
lgro.img.height)])
if lgro.is_valid_palette_image():
if palette and not is_mask and lgro.get_palette() != palette:
message.append([
WARN_PALETTE_MISMATCH,
lgro,
("Warning: %s's palette does not "
"match the LGR's palette!") % lgro.name])
else:
message.append([
ERR_IMAGE_INVALID_PALETTE,
lgro,
"%s has an invalid palette" % lgro.name])
if namelower == "qcolors":
if lgro.img.width != LGR_WARNING_QCOLORS_WIDTH or lgro.img.height != LGR_WARNING_QCOLORS_HEIGHT:
message.append([
WARN_QCOLORS_WRONG_SIZE,
lgro,
"Warning: qcolors usually has dimensions of %sx%s, "
"but the dimensions are %sx%s" % (
LGR_WARNING_QCOLORS_WIDTH,
LGR_WARNING_QCOLORS_HEIGHT,
lgro.img.width,
lgro.img.height)])
if namelower == "q1bike" or namelower == "q2bike":
if(lgro.img.width < LGR_RECOMMEND_BIKE_WIDTH or
lgro.img.height < LGR_RECOMMEND_BIKE_HEIGHT):
message.append([
WARN_QBIKE_TOO_SMALL,
lgro,
"Warning: %s needs dimensions of at least %sx%s to be "
"rendered properly, but the dimensions are %sx%s" % (
lgro.name,
LGR_RECOMMEND_BIKE_WIDTH,
LGR_RECOMMEND_BIKE_HEIGHT,
lgro.img.width,
lgro.img.height)])
else:
message.append([
ERR_IMG_MISSING,
lgro,
"%s has no image!" % lgro.name])
else:
raise ValueError("only LGR and LGR_Image objects can be "
"evaluated with this function")
return message
|
|
"""
Analyse picoscope data.
"""
from __future__ import print_function, division
import sys
import logging
import warnings
from os.path import isfile, isdir, abspath, split
from argparse import (ArgumentTypeError, ArgumentParser,
RawDescriptionHelpFormatter)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from getfilelist import get_file_list
CMD_EXAMPLES = """
EXAMPLES
$ ./picanalysis -f disp /eos/project/l/lhcbgv/data/L0Trigger/2017/dataset/
$ ./picanalysis -f min /eos/project/l/lhcbgv/data/L0Trigger/2017/dataset/
$ ./picanalysis -f dtmin -t C -r 8.5 -d 0.2 /eos/project/l/lhcbgv/data/L0Trigger/2017/dataset/
$ ./picanalysis -f dtmin -t C -r 1.75 -d 0.2 /eos/project/l/lhcbgv/data/L0Trigger/2017/dataset/
"""
CH_COL = {'A': (0, 0, 0, .05), 'B': (0, 0, 1, .05),
'C': (1, 0, 0, .05), 'D': (0, 1, 0, .05)}
CH_COL2 = {'A': (0, 0, 0), 'B': (0, 0, 1),
'C': (1, 0, 0), 'D': (0, 1, 0)}
KWARGS = {'tick_size': {'labelsize': 13},
'lbl_size': {'fontsize': 13},
'subplots_adjust': {'right': .95, 'left': .05}
}
NUM_VALS_BASE = 10
# CH_HANDLES = {k: Patch(color=CH_COL2[k]) for k in CH_COL.keys()}
LOAD_KW_CSV = {'delimiter': ',', 'skiprows': 3, 'unpack': True}
warnings.simplefilter("error", RuntimeWarning)
LOG_MSG_FMT = "[%(asctime)s][%(levelname)-8s]\
[%(filename)s, %(lineno)d][%(name)s]\t%(message)s"
LOG_DT_FMT = "\033[1m%m-%d %H:%M:%S\033[0m"
def set_logging(verbosity, msg_fmt=LOG_MSG_FMT, dt_fmt=LOG_DT_FMT):
"""Make logging ouput pretty."""
logging.addLevelName(logging.DEBUG, "\033[1;34m%-8s\033[1;0m"
% logging.getLevelName(logging.DEBUG))
logging.addLevelName(logging.INFO, "\033[1;37m%-8s\033[1;0m"
% logging.getLevelName(logging.INFO))
logging.addLevelName(logging.WARNING, "\033[1;33m%-8s\033[1;0m"
% logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.ERROR, "\033[1;31m%-8s\033[1;0m"
% logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.CRITICAL, "\033[1;41m%-8s\033[1;0m"
% logging.getLevelName(logging.CRITICAL))
logging.basicConfig(level=verbosity, format=LOG_MSG_FMT,
datefmt=LOG_DT_FMT)
class PicoAnalysis(object):
"""A class for the analysis of data received with the picoscope.
"""
def __init__(self):
"""Initialize the data structures.
"""
self.hists = {} # Histogram data
self.signals = {} # Full signals
self.header = {} # Header
self.inferrs = {} # infinity errors
self.threserrs = {} # threshold errors
def reset(self):
self.hists = {}
self.signals = {}
def get_csv_data(self, fn, **kwargs):
"""Get data from a picoscope csv file. This function also
provides the possibility for
"""
# TODO - some more arguments here for more functionality
# shift = kwargs.get('shift', 0)
# absol = kwargs.get('abs', False)
hdr = {}
with open(fn, 'r') as fp:
hdr['titles'] = fp.readline().strip().split(',')
hdr['units'] = fp.readline().strip().split(',')
try:
data = np.loadtxt(fn, **LOAD_KW_CSV)
except (StopIteration, ValueError):
logger.warning('Problem in %s' % split(fn)[1])
return None
for t, d in zip(hdr['titles'][1:], data[1:]):
if d[np.isnan(d)]:
logger.warning('%d NaN values encountered in %s'
% (d[np.isnan(d)].size, t))
return hdr, data
#######################
# Transient functions #
#######################
def func_min_trans(self, tpdata):
"""Return the minimum value of the signal for each channel.
"""
hdr, data = tpdata
for t, dat in zip(hdr['titles'][1:], data[1:]):
tmp = self.hists.get(t, [])
r = np.min(dat)
if np.isinf(r):
terr = self.inferrs.get(t, 0)
terr += 1
self.inferrs[t] = terr
continue
tmp.append(r)
self.hists[t] = tmp
def func_max_trans(self, tpdata):
"""Return the maximum value of the signal for each channel.
"""
hdr, data = tpdata
for t, dat in zip(hdr['titles'][1:], data[1:]):
tmp = self.hists.get(t, [])
r = np.max(dat)
if np.isinf(r):
terr = self.inferrs.get(t, 0)
terr += 1
self.inferrs[t] = terr
return
tmp.append(r)
self.hists[t] = tmp
def func_sum_trans(self, tpdata):
"""Return the sum value of the signal for each channel.
"""
hdr, data = tpdata
for t, dat in zip(hdr['titles'][1:], data[1:]):
tmp = self.hists.get(t, [])
r = np.sum(dat)
if np.isinf(r):
terr = self.inferrs.get(t, 0)
terr += 1
self.inferrs[t] = terr
return
tmp.append(r)
self.hists[t] = tmp
def func_sum2_trans(self, tpdata, **kwargs):
"""Return the absolute sum value of the signal for each channel.
Base is also calculated / given in case the signal is shifted.
"""
base = kwargs['base']
hdr, data = tpdata
for t, dat in zip(hdr['titles'][1:], data[1:]):
if base is None:
b = np.mean(dat[:NUM_VALS_BASE])
else:
b = base
ndat = dat - b
tmp = self.hists.get(t, [])
tmp.append(np.sum(np.absolute(ndat)))
self.hists[t] = tmp
def func_ttmin_trans(self, tpdata, **kwargs):
"""Return something.
"""
base = kwargs['base']
cfd = kwargs['cfd']
thres = kwargs['threshold']
hdr, data = tpdata
x = data[0]
for t, dat in zip(hdr['titles'][1:], data[1:]):
dat = np.asarray(dat)
if base is None:
b = np.mean(dat[:NUM_VALS_BASE])
else:
b = base
try:
minim = np.min(dat)
if np.isinf(minim):
terr = self.inferrs.get(t, 0)
terr += 1
self.inferrs[t] = terr
return
if minim > kwargs['threshold']:
terr = self.threserrs.get(t, 0)
terr += 1
self.threserrs[t] = terr
return
thres = b - abs(minim-b)*cfd
tfin = x[np.where(dat <= thres)[0][0]]
tmp = self.hists.get(t, [])
tmp.append(tfin)
self.hists[t] = tmp
except IndexError:
logging.error('BASE=%f, MEAN=%f' % (b, np.mean(dat)))
logging.error('BASE=%s' % (dat[:NUM_VALS_BASE]))
logging.error('MIN=%f, THRES=%f' % (minim, thres))
logging.error(np.where(dat <= thres))
raise
try:
self.inferrs[t] += 1
except KeyError:
self.inferrs[t] = 1
def func_dtmin_trans(self, tpdata, **kwargs):
"""Return something.
"""
base = kwargs['base']
cfd = kwargs['cfd']
trg_ch = kwargs['trg_ch']
thres = kwargs['threshold']
hdr, data = tpdata
x = data[0]
channels = [c.split(' ')[1] for c in hdr['titles'][1:]]
tindx = channels.index(trg_ch) + 1
# First this for the triggering channel
dat = np.asarray(data[tindx])
if base is None:
b = np.mean(dat[:NUM_VALS_BASE])
else:
b = base
minim = np.min(dat)
if np.isinf(minim):
terr = self.inferrs.get(hdr['titles'][tindx], 0)
terr += 1
self.inferrs[hdr['titles'][tindx]] = terr
return
thres = b - abs(minim-b)*cfd
trg_ch_tfin = x[np.where(dat <= thres)[0][0]]
for t, dat in zip(hdr['titles'][1:], data[1:]):
dat = np.asarray(dat)
if base is None:
b = np.mean(dat[:NUM_VALS_BASE])
else:
b = base
try:
minim = np.min(dat)
if np.isinf(minim):
terr = self.inferrs.get(t, 0)
terr += 1
self.inferrs[t] = terr
return
if minim > kwargs['threshold']:
terr = self.threserrs.get(t, 0)
terr += 1
self.threserrs[t] = terr
return
thres = b - abs(minim-b)*cfd
tfin = x[np.where(dat <= thres)[0][0]]
tmp = self.hists.get(t, [])
tmp.append(tfin-trg_ch_tfin)
self.hists[t] = tmp
except IndexError:
logging.error('BASE=%f, MEAN=%f' % (b, np.mean(dat)))
logging.error('BASE=%s' % (dat[:NUM_VALS_BASE]))
logging.error('MIN=%f, THRES=%f' % (minim, thres))
logging.error(np.where(dat <= thres))
raise
try:
self.inferrs[t] += 1
except KeyError:
self.inferrs[t] = 1
#####################
# Parsing functions #
#####################
def parse_files_hist(self, filelist, func, **kwargs):
totf = len(filelist)
logger.info('Reading %s files...' % totf)
# getting header of initial file, so that if different -> warning
self.header, _ = self.get_csv_data(filelist[0])
logging.debug("Header of first file: %s" % self.header)
logging.debug("File list length: %d" % len(filelist))
logging.debug("Function to be used: %s" % func.__name__)
logging.debug("Keywords: %s" % kwargs)
for numf, f in enumerate(filelist):
status = "Progress: %s / %s \r" % (numf+1, totf)
print(status, end='')
tpdata = self.get_csv_data(f) # transient picoscope data
head = tpdata[0]
if head['titles'] != self.header['titles']:
logger.warning("Header titles mismatch: %s" % split(f)[1])
if head['units'] != self.header['units']:
logger.warning("Header units mismatch %s" % split(f)[1])
func(tpdata, **kwargs) # getting min of every channel
print()
if any(self.inferrs.values()):
logger.warning('Infinity errors encountered: %s' % self.inferrs)
if any(self.threserrs.values()):
logger.warning('Threshold errors encountered: %s' % self.threserrs)
def parse_signal_disp(self, filelist):
totf = len(filelist)
logger.info('Reading %s files...' % totf)
# getting header of initial file, so that if different -> warning
self.header, data = self.get_csv_data(filelist[0])
# x_init = data[0]
for numf, f in enumerate(filelist):
status = "Progress: %s / %s \r" % (numf+1, totf)
print(status, end='')
tpdata = self.get_csv_data(f) # transient picoscope data
head = tpdata[0]
if head['titles'] != self.header['titles']:
logger.warning("Header titles mismatch: %s" % split(f)[1])
if head['units'] != self.header['units']:
logger.warning("Header units mismatch %s" % split(f)[1])
hdr, data = tpdata
x = data[0]
for t, dat in zip(hdr['titles'][1:], data[1:]):
tmp = self.signals.get(t, [])
tmp.append(np.array([x, dat]))
self.signals[t] = tmp
print()
#####################
# Plotter Functions #
#####################
def plot_hist(self, datapack, **kwargs):
bins = kwargs.get('bins', 20)
figtitle = kwargs.get('figtitle', 'Histograms')
figsize = kwargs.get('figsize', (20, 20))
lims = kwargs.get('lims', None)
xl = "%s, %s" % (self.header['titles'][0], self.header['units'][0])
xlbl = kwargs.get('xlbl', xl)
ylbl = kwargs.get('ylbl', '')
norm = kwargs.get('norm', False)
f = plt.figure(figtitle, figsize=figsize)
f.subplots_adjust(right=0.95, left=0.05)
axes = []
maxax = len(datapack)
for i, (key, value) in enumerate(datapack.iteritems()):
ax = f.add_subplot(int('1%s%s' % (maxax, i+1)))
ax.set_title(key)
ax.set_xlabel(xlbl, **KWARGS['lbl_size'])
ax.set_ylabel(ylbl, **KWARGS['lbl_size'])
ax.tick_params(**KWARGS['tick_size'])
ax.grid(True, lw=2)
value = np.asarray(value)
value = value[~np.isnan(value)]
if lims is not None:
l, h = lims
value = value[(value > l) & (value < h)]
ax.hist(value, bins=bins, fc=(0, 0, 0, .7), normed=norm)
#if lims is not None:
# ax.set_xlim(*lims)
#"""
l = ["Entries = %d" % len(value),
"Mean = %.2f" % np.mean(value), "Std = %.2f" % np.std(value),
"Max = %.2f" % np.max(value), "Min = %.2f" % np.min(value)]
if lims is not None:
l.append('Lims: %s' % str(lims))
h = [Patch(color='w') for _ in l]
ax.legend(h, l, loc=2, numpoints=1, framealpha=.7, fancybox=True)
"""
st = (np.mean(value), np.std(value), np.max(value), np.min(value))
lbl = 'Mean = %.2f\nStd = %.2f\nMax = %.2f\nMin = %.2f' % st
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=.8)
ax.annotate(lbl, xy=(1.1, 1.1), xycoords='axes fraction',
size=14, ha='right', va='top', bbox=bbox_props)
"""
axes.append(ax)
plt.show()
def plot_signal(self, datapack, **kwargs):
figtitle = kwargs.get('figtitle', 'Histograms')
figsize = kwargs.get('figsize', (10, 10))
xlbl = kwargs.get('xlbl', 'Time, '+self.header['units'][0])
# TODO - Warning - what is different ranges / units per channel ?
ylbl = kwargs.get('ylbl', 'Amplitude, '+self.header['units'][1])
f = plt.figure(figtitle, figsize=figsize)
ax = f.add_subplot(111)
ax.set_xlabel(xlbl, **KWARGS['lbl_size'])
ax.set_ylabel(ylbl, **KWARGS['lbl_size'])
ax.tick_params(**KWARGS['tick_size'])
ax.grid(True, lw=2)
logger.info('Plotting...')
h, l = [], []
for i, (key, dp) in enumerate(datapack.iteritems()):
col = CH_COL[key.split(' ')[1]]
pcol = CH_COL2[key.split(' ')[1]]
h.append(Patch(color=pcol))
l.append(key)
for d in dp:
x, y = d
ax.plot(x, y, c=col)
ax.legend(h, l, framealpha=.8, fancybox=True, loc=4,
prop={'size': 17})
plt.show()
def hist_trigger_time_min(self, filelist, **kwargs):
logger.debug(kwargs)
bins = kwargs['bins']
norm = kwargs['norm']
self.hists = {}
self.inferrs = {}
totf = len(filelist)
logger.info('Reading %s files...' % totf)
# getting header of initial file, so that if different -> warning
self.header, _ = self.get_csv_data(filelist[0])
for numf, f in enumerate(filelist):
status = "Progress: %s / %s \r" % (numf+1, totf)
print(status, end='')
tpdata = self.get_csv_data(f) # transient picoscope data
head = tpdata[0]
if head['titles'] != self.header['titles']:
logger.warning("Header titles mismatch: %s" % split(f)[1])
if head['units'] != self.header['units']:
logger.warning("Header units mismatch %s" % split(f)[1])
self.func_ttmin_trans(tpdata, **kwargs)
print()
if self.inferrs:
logging.warning('IndexErrors: %s' % self.inferrs)
self.plot_hist(self.hists, figsize=(20, 5), figtitle='trg_time_hist',
norm=norm, bins=bins)
def hist_delta_trigger_time_min(self, filelist, **kwargs):
logger.debug(kwargs)
bins = kwargs['bins']
norm = kwargs['norm']
self.hists = {}
self.inferrs = {}
totf = len(filelist)
logger.info('Reading %s files...' % totf)
# getting header of initial file, so that if different -> warning
self.header, _ = self.get_csv_data(filelist[0])
for numf, f in enumerate(filelist):
status = "Progress: %s / %s \r" % (numf+1, totf)
print(status, end='')
tpdata = self.get_csv_data(f) # transient picoscope data
head = tpdata[0]
if head['titles'] != self.header['titles']:
logger.warning("Header titles mismatch: %s" % split(f)[1])
if head['units'] != self.header['units']:
logger.warning("Header units mismatch %s" % split(f)[1])
self.func_dtmin_trans(tpdata, **kwargs)
print()
if self.inferrs:
logging.warning('IndexErrors: %s' % self.inferrs)
self.plot_hist(self.hists, figsize=(20, 5), norm=norm, bins=bins,
figtitle='trg_delta_time_hist')
def rfloat(x):
"""Check that the float is between [0, 1] as this is a percentage.
"""
x = float(x)
if x < 0.0 or x > 1.0:
raise ArgumentTypeError("%r not in range [0.0, 1.0]" % (x,))
return x
if __name__ == '__main__':
#################################
# PARSER DEFINITION #
#################################
parser = ArgumentParser(description='Analyse picoscope data.',
epilog=CMD_EXAMPLES,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('paths', type=str, nargs='+',
help='The files or directories to retrieve data.')
parser.add_argument('-f', '--func', dest='function', type=str,
default='disp',
choices=['disp', 'min', 'max', 'sum', 'sum2',
'ttmin', 'ttmax', 'dtmin', 'dtmax'],
help='Function code. Use basically disp,min,dtmin.')
parser.add_argument('-y', '--type', dest='type', type=str, default='csv',
choices=['csv', 'nope'],
help='Type of file input. Currently only csv \
supported.')
parser.add_argument('-n', '--num', dest='num', type=int, default=None,
help='Number of files to be processed if directory \
is given. If multiple directories, then num files \
from each.')
parser.add_argument('-c', '--channels', dest='channels', type=str,
default='ABCD', help='String for channel definition. \
Combinations of {ABCD} are valid.')
parser.add_argument('-b', '--bins', dest='bins', type=int,
default=50, help='Number of bins for histograms.')
parser.add_argument('-l', '--lims', dest='lims', default=None,
help='Lim values for the histogram.')
parser.add_argument('--no-norm', dest='norm', action='store_false',
default=True, help='Set the histograms not to be \
normalized (sum of hist integral=1)')
parser.add_argument('-t', '--trg-ch', dest='trg_ch', type=str,
default=None, choices=['A', 'B', 'C', 'D'],
help='Trigger Channel.')
parser.add_argument('-d', '--cfd', dest='cfd', type=rfloat, default=0.5,
help='Constant fraction descriminator fraction. \
For tt/dt functions.')
parser.add_argument('-a', '--base', dest='base', type=float, default=None,
help='Base value from where the cdf will be \
calculated. If not set, it will be calculated as the \
mean of the first 10 values. For tt/dt functions.')
parser.add_argument('-r', '--threshold', dest='threshold', type=float,
default=-0.05,
help='Threshold lower from which a value will be \
considered valid minimum.')
parser.add_argument('-v', '--verbosity', action='count',
help='Increase output verbosity.')
args = parser.parse_args()
kw = vars(args)
paths = kw['paths']
del kw['paths']
if kw['verbosity'] is None:
verbosity = 30
elif kw['verbosity'] == 1:
verbosity = 20
else:
verbosity = 10
set_logging(verbosity)
logger = logging.getLogger(__name__)
logger.info(args)
#################################
# Starting the main part #
#################################
bins = kw['bins']
norm = kw['norm']
lims = kw['lims']
if kw['lims'] is not None:
lims = eval(kw['lims'])
lims = tuple([int(item) for item in lims])
logging.debug('Lims: %s' % str(lims))
assert len(lims) == 2 and type(lims) == tuple and lims[0] < lims[1]
logger.info('Getting filelist...')
filelist = []
for p in paths:
pn = abspath(p)
if isdir(pn):
try:
tmp = get_file_list(pn)[1]
if kw['num'] is not None:
tmp = tmp[:kw['num']]
filelist.extend(tmp)
except IndexError:
logging.waring('IndexError in dir: %s.' % p)
logging.waring('Directory has %d files, requested %d'
% (len(tmp), kw['num']))
elif isfile(pn):
filelist.append(pn)
logger.info('Done.')
if not filelist:
logger.error('No files found.')
sys.exit()
pa = PicoAnalysis()
if kw['function'] == 'disp':
pa.parse_signal_disp(filelist)
pa.plot_signal(pa.signals, figsize=(10, 10), figtitle='Signals')
elif kw['function'] == 'min':
pa.parse_files_hist(filelist, pa.func_min_trans)
pa.plot_hist(pa.hists, figsize=(20, 8), figtitle='min_val_hist',
xlbl='Minimum Value', norm=norm, bins=bins, lims=lims)
elif kw['function'] == 'max':
pa.parse_files_hist(filelist, pa.func_max_trans)
pa.plot_hist(pa.hists, figsize=(20, 8), figtitle='max_val_hist',
xlbl='Maximum Value', norm=norm, bins=bins)
elif kw['function'] == 'sum':
pa.parse_files_hist(filelist, pa.func_sum_trans)
pa.plot_hist(pa.hists, figsize=(20, 8), figtitle='sum_val_hist',
xlbl='Sum Value', norm=norm, bins=bins)
elif kw['function'] == 'sum2':
pa.parse_files_hist(filelist, pa.func_sum2_trans, **kw)
pa.plot_hist(pa.hists, figsize=(20, 8), figtitle='sum2_val_hist',
xlbl='Sum Value [2]', norm=norm, bins=bins)
elif kw['function'] == 'ttmin':
# pa.hist_trigger_time_min(filelist, **kw)
pa.parse_files_hist(filelist, pa.func_ttmin_trans, **kw)
pa.plot_hist(pa.hists, figsize=(20, 8), figtitle='trg_time_hist',
norm=norm, bins=bins)
elif kw['function'] == 'ttmax':
logger.error('Not Implemented')
elif kw['function'] == 'dtmin':
if kw['trg_ch'] is None:
logging.error('Triggering channel must be specified for dt \
functions. --help for details')
parser.print_usage()
else:
# pa.hist_delta_trigger_time_min(filelist, **kw)
pa.parse_files_hist(filelist, pa.func_dtmin_trans, **kw)
pa.plot_hist(pa.hists, figsize=(20, 8), norm=norm, bins=bins,
figtitle='trg_delta_time_hist')
elif kw['function'] == 'dtmax':
logger.error('Not Implemented')
|
|
from __future__ import absolute_import
from functools import partial
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.decorator import api_key_only_webhook_view, REQ, has_request_variables
from zerver.lib.webhooks.git import get_push_commits_event_message, EMPTY_SHA,\
get_remove_branch_event_message, get_pull_request_event_message,\
get_issue_event_message, SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE,\
get_commits_comment_action_message, get_push_tag_event_message
from zerver.models import Client, UserProfile
from django.http import HttpRequest, HttpResponse
from typing import Dict, Any, Iterable, Optional, Text
class UnknownEventType(Exception):
pass
def get_push_event_body(payload):
# type: (Dict[str, Any]) -> Text
if payload.get('after') == EMPTY_SHA:
return get_remove_branch_event_body(payload)
return get_normal_push_event_body(payload)
def get_normal_push_event_body(payload):
# type: (Dict[str, Any]) -> Text
compare_url = u'{}/compare/{}...{}'.format(
get_repository_homepage(payload),
payload['before'],
payload['after']
)
commits = [
{
'name': commit.get('author').get('name'),
'sha': commit.get('id'),
'message': commit.get('message'),
'url': commit.get('url')
}
for commit in payload.get('commits')
]
return get_push_commits_event_message(
get_user_name(payload),
compare_url,
get_branch_name(payload),
commits
)
def get_remove_branch_event_body(payload):
# type: (Dict[str, Any]) -> Text
return get_remove_branch_event_message(
get_user_name(payload),
get_branch_name(payload)
)
def get_tag_push_event_body(payload):
# type: (Dict[str, Any]) -> Text
return get_push_tag_event_message(
get_user_name(payload),
get_tag_name(payload),
action="pushed" if payload.get('checkout_sha') else "removed"
)
def get_issue_created_event_body(payload):
# type: (Dict[str, Any]) -> Text
return get_issue_event_message(
get_issue_user_name(payload),
'created',
get_object_url(payload),
payload.get('object_attributes').get('iid'),
payload.get('object_attributes').get('description'),
get_objects_assignee(payload)
)
def get_issue_event_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
return get_issue_event_message(
get_issue_user_name(payload),
action,
get_object_url(payload),
payload.get('object_attributes').get('iid'),
)
def get_merge_request_updated_event_body(payload):
# type: (Dict[str, Any]) -> Text
if payload.get('object_attributes').get('oldrev'):
return get_merge_request_event_body(payload, "added commit(s) to")
return get_merge_request_open_or_updated_body(payload, "updated")
def get_merge_request_event_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
pull_request = payload.get('object_attributes')
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('iid'),
type='MR',
)
def get_merge_request_open_or_updated_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
pull_request = payload.get('object_attributes')
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('iid'),
pull_request.get('source_branch'),
pull_request.get('target_branch'),
pull_request.get('description'),
get_objects_assignee(payload),
type='MR',
)
def get_objects_assignee(payload):
# type: (Dict[str, Any]) -> Optional[Text]
assignee_object = payload.get('assignee')
if assignee_object:
return assignee_object.get('name')
return None
def get_commented_commit_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({})'.format(comment['url'])
return get_commits_comment_action_message(
get_issue_user_name(payload),
action,
payload.get('commit').get('url'),
payload.get('commit').get('id'),
comment['note'],
)
def get_commented_merge_request_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/merge_requests/{}'.format(
payload.get('project').get('web_url'),
payload.get('merge_request').get('iid')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('merge_request').get('iid'),
message=comment['note'],
type='MR'
)
def get_commented_issue_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/issues/{}'.format(
payload.get('project').get('web_url'),
payload.get('issue').get('iid')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('issue').get('iid'),
message=comment['note'],
type='Issue'
)
def get_commented_snippet_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/snippets/{}'.format(
payload.get('project').get('web_url'),
payload.get('snippet').get('id')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('snippet').get('id'),
message=comment['note'],
type='Snippet'
)
def get_wiki_page_event_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
return u"{} {} [Wiki Page \"{}\"]({}).".format(
get_issue_user_name(payload),
action,
payload.get('object_attributes').get('title'),
payload.get('object_attributes').get('url'),
)
def get_build_hook_event_body(payload):
# type: (Dict[str, Any]) -> Text
build_status = payload.get('build_status')
if build_status == 'created':
action = 'was created'
elif build_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(build_status)
return u"Build {} from {} stage {}.".format(
payload.get('build_name'),
payload.get('build_stage'),
action
)
def get_pipeline_event_body(payload):
# type: (Dict[str, Any]) -> Text
pipeline_status = payload.get('object_attributes').get('status')
if pipeline_status == 'pending':
action = 'was created'
elif pipeline_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(pipeline_status)
builds_status = u""
for build in payload.get('builds'):
builds_status += u"* {} - {}\n".format(build.get('name'), build.get('status'))
return u"Pipeline {} with build(s):\n{}.".format(action, builds_status[:-1])
def get_repo_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['project']['name']
def get_user_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['user_name']
def get_issue_user_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['user']['name']
def get_repository_homepage(payload):
# type: (Dict[str, Any]) -> Text
return payload['repository']['homepage']
def get_branch_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['ref'].replace('refs/heads/', '')
def get_tag_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['ref'].replace('refs/tags/', '')
def get_object_iid(payload):
# type: (Dict[str, Any]) -> Text
return payload['object_attributes']['iid']
def get_object_url(payload):
# type: (Dict[str, Any]) -> Text
return payload['object_attributes']['url']
EVENT_FUNCTION_MAPPER = {
'Push Hook': get_push_event_body,
'Tag Push Hook': get_tag_push_event_body,
'Issue Hook open': get_issue_created_event_body,
'Issue Hook close': partial(get_issue_event_body, action='closed'),
'Issue Hook reopen': partial(get_issue_event_body, action='reopened'),
'Issue Hook update': partial(get_issue_event_body, action='updated'),
'Note Hook Commit': get_commented_commit_event_body,
'Note Hook MergeRequest': get_commented_merge_request_event_body,
'Note Hook Issue': get_commented_issue_event_body,
'Note Hook Snippet': get_commented_snippet_event_body,
'Merge Request Hook open': partial(get_merge_request_open_or_updated_body, action='created'),
'Merge Request Hook update': get_merge_request_updated_event_body,
'Merge Request Hook merge': partial(get_merge_request_event_body, action='merged'),
'Merge Request Hook close': partial(get_merge_request_event_body, action='closed'),
'Wiki Page Hook create': partial(get_wiki_page_event_body, action='created'),
'Wiki Page Hook update': partial(get_wiki_page_event_body, action='updated'),
'Build Hook': get_build_hook_event_body,
'Pipeline Hook': get_pipeline_event_body,
}
@api_key_only_webhook_view("Gitlab")
@has_request_variables
def api_gitlab_webhook(request, user_profile, client,
stream=REQ(default='gitlab'),
payload=REQ(argument_type='body'),
branches=REQ(default=None)):
# type: (HttpRequest, UserProfile, Client, Text, Dict[str, Any], Optional[Text]) -> HttpResponse
event = get_event(request, payload, branches)
if event is not None:
body = get_body_based_on_event(event)(payload)
subject = get_subject_based_on_event(event, payload)
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
def get_body_based_on_event(event):
# type: (str) -> Any
return EVENT_FUNCTION_MAPPER[event]
def get_subject_based_on_event(event, payload):
# type: (str, Dict[str, Any]) -> Text
if event == 'Push Hook':
return u"{} / {}".format(get_repo_name(payload), get_branch_name(payload))
elif event == 'Build Hook':
return u"{} / {}".format(payload.get('repository').get('name'), get_branch_name(payload))
elif event == 'Pipeline Hook':
return u"{} / {}".format(
get_repo_name(payload),
payload.get('object_attributes').get('ref').replace('refs/heads/', ''))
elif event.startswith('Merge Request Hook'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload.get('object_attributes').get('iid'),
title=payload.get('object_attributes').get('title')
)
elif event.startswith('Issue Hook'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Issue',
id=payload.get('object_attributes').get('iid'),
title=payload.get('object_attributes').get('title')
)
elif event == 'Note Hook Issue':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Issue',
id=payload.get('issue').get('iid'),
title=payload.get('issue').get('title')
)
elif event == 'Note Hook MergeRequest':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload.get('merge_request').get('iid'),
title=payload.get('merge_request').get('title')
)
elif event == 'Note Hook Snippet':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Snippet',
id=payload.get('snippet').get('id'),
title=payload.get('snippet').get('title')
)
return get_repo_name(payload)
def get_event(request, payload, branches):
# type: (HttpRequest, Dict[str, Any], Text) -> Optional[str]
event = request.META['HTTP_X_GITLAB_EVENT']
if event == 'Issue Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Note Hook':
action = payload.get('object_attributes').get('noteable_type')
event = "{} {}".format(event, action)
elif event == 'Merge Request Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Wiki Page Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Push Hook':
if branches is not None:
branch = get_branch_name(payload)
if branch not in branches.split(','):
return None
if event in list(EVENT_FUNCTION_MAPPER.keys()):
return event
raise UnknownEventType(u'Event {} is unknown and cannot be handled'.format(event))
|
|
import sys
import time
from django.conf import settings
from django.db.utils import load_backend
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
return '%x' % (abs(hash(args)) % 4294967296L) # 2**32
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
null = f.null
if (f.empty_strings_allowed and not f.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if not null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
tablespace_sql = self.connection.ops.tablespace_sql(
tablespace, inline=True)
if tablespace_sql:
field_output.append(tablespace_sql)
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(
f, known_models, style)
if pending:
pending_references.setdefault(f.rel.to, []).append(
(model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
", ".join(
[style.SQL_FIELD(qn(opts.get_field(f).column))
for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(
' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
opts.db_tablespace)
if tablespace_sql:
full_statement.append(tablespace_sql)
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary
# keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"""
Return the SQL snippet defining the foreign key reference for a field.
"""
qn = self.connection.ops.quote_name
if field.rel.to in known_models:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' +
style.SQL_TABLE(qn(field.rel.to._meta.db_table)) + ' (' +
style.SQL_FIELD(qn(field.rel.to._meta.get_field(
field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"""
Returns any ALTER TABLE statements to add constraints after the fact.
"""
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
qn = self.connection.ops.quote_name
final_output = []
opts = model._meta
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (
r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') +
' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table), qn(truncate_name(
r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_indexes_for_model(self, model, style):
"""
Returns the CREATE INDEX SQL statements for a single model.
"""
if not model._meta.managed or model._meta.proxy:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
return output
def sql_indexes_for_field(self, model, f, style):
"""
Return the CREATE INDEX SQL statements for a single model field.
"""
from django.db.backends.util import truncate_name
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
if tablespace_sql:
tablespace_sql = ' ' + tablespace_sql
else:
tablespace_sql = ''
i_name = '%s_%s' % (model._meta.db_table, self._digest(f.column))
output = [style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(
i_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(model._meta.db_table)) + ' ' +
"(%s)" % style.SQL_FIELD(qn(f.column)) +
"%s;" % tablespace_sql]
else:
output = []
return output
def sql_destroy_model(self, model, references_to_delete, style):
"""
Return the DROP TABLE and restraint dropping statements for a single
model.
"""
if not model._meta.managed or model._meta.proxy:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(
model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (
col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(
r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
call_command('createcachetable', cache._table,
database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
self.connection.cursor()
return test_database_name
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"""
Internal implementation - creates the test db tables.
"""
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self._prepare_for_test_db_ddl()
try:
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception as e:
sys.stderr.write(
"Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..."
% self.connection.alias)
cursor.execute(
"DROP DATABASE %s" % qn(test_database_name))
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name),
suffix))
except Exception as e:
sys.stderr.write(
"Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Destroying test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
# Temporarily use a new connection and a copy of the settings dict.
# This prevents the production database from being exposed to potential
# child threads while (or after) the test database is destroyed.
# Refs #10868 and #17786.
settings_dict = self.connection.settings_dict.copy()
settings_dict['NAME'] = old_database_name
backend = load_backend(settings_dict['ENGINE'])
new_connection = backend.DatabaseWrapper(
settings_dict,
alias='__destroy_test_db__',
allow_thread_sharing=False)
new_connection.creation._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"""
Internal implementation - remove the test db tables.
"""
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self._prepare_for_test_db_ddl()
# Wait to avoid "database is being accessed by other users" errors.
time.sleep(1)
cursor.execute("DROP DATABASE %s"
% self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"""
Make sure a connection is in autocommit mode. - Deprecated, not used
anymore by Django code. Kept for compatibility with user code that
might use it.
"""
pass
def _prepare_for_test_db_ddl(self):
"""
Internal implementation - Hook for tasks that should be performed
before the ``CREATE DATABASE``/``DROP DATABASE`` clauses used by
testing code to create/ destroy test databases. Needed e.g. in
PostgreSQL to rollback and close any active transaction.
"""
pass
def sql_table_creation_suffix(self):
"""
SQL to append to the end of the test table creation statements.
"""
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
|
|
"""
pygments.lexers.crystal
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Crystal.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, include, \
bygroups, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Whitespace
__all__ = ['CrystalLexer']
line_re = re.compile('.*?\n')
CRYSTAL_OPERATORS = [
'!=', '!~', '!', '%', '&&', '&', '**', '*', '+', '-', '/', '<=>', '<<', '<=', '<',
'===', '==', '=~', '=', '>=', '>>', '>', '[]=', '[]?', '[]', '^', '||', '|', '~'
]
class CrystalLexer(ExtendedRegexLexer):
"""
For `Crystal <http://crystal-lang.org>`_ source code.
.. versionadded:: 2.2
"""
name = 'Crystal'
aliases = ['cr', 'crystal']
filenames = ['*.cr']
mimetypes = ['text/x-crystal']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Crystal...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), String.Delimiter, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs, so limit the recursion depth
if len(heredocstack) < 100:
yield from self.get_tokens_unprocessed(context=ctx)
else:
yield ctx.pos, String.Heredoc, match.group(5)
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), String.Delimiter, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_crystalstrings_rules():
states = {}
states['strings'] = [
(r'\:\w+[!?]?', String.Symbol),
(words(CRYSTAL_OPERATORS, prefix=r'\:'), String.Symbol),
(r":'(\\\\|\\[^\\]|[^'\\])*'", String.Symbol),
# This allows arbitrary text after '\ for simplicity
(r"'(\\\\|\\'|[^']|\\[^'\\]+)'", String.Char),
(r':"', String.Symbol, 'simple-sym'),
# Crystal doesn't have "symbol:"s but this simplifies function args
(r'([a-zA-Z_]\w*)(:)(?!:)', bygroups(String.Symbol, Punctuation)),
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-escaped' if name == 'sym' else 'string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# https://crystal-lang.org/docs/syntax_and_semantics/literals/string.html#percent-string-literals
for lbrace, rbrace, bracecc, name in \
('\\{', '\\}', '{}', 'cb'), \
('\\[', '\\]', '\\[\\]', 'sb'), \
('\\(', '\\)', '()', 'pa'), \
('<', '>', '<>', 'ab'), \
('\\|', '\\|', '\\|', 'pi'):
states[name+'-intp-string'] = [
(r'\\' + lbrace, String.Other),
] + (lbrace != rbrace) * [
(lbrace, String.Other, '#push'),
] + [
(rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%Q?' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
] + (lbrace != rbrace) * [
(lbrace, String.Other, '#push'),
] + [
(rbrace, String.Other, '#pop'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
# https://crystal-lang.org/docs/syntax_and_semantics/literals/array.html#percent-array-literals
states['strings'].append((r'%[qwi]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + bracecc + ']', String.Regex),
] + (lbrace != rbrace) * [
(lbrace, String.Regex, '#push'),
] + [
(rbrace + '[imsx]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + bracecc + ']', String.Regex),
(r'[^\\#' + bracecc + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
# keywords
(words('''
abstract asm begin break case do else elsif end ensure extend if in
include next of private protected require rescue return select self super
then unless until when while with yield
'''.split(), suffix=r'\b'), Keyword),
(words('''
previous_def forall out uninitialized __DIR__ __FILE__ __LINE__
__END_LINE__
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Keyword.Pseudo),
# https://crystal-lang.org/docs/syntax_and_semantics/is_a.html
(r'\.(is_a\?|nil\?|responds_to\?|as\?|as\b)', Keyword.Pseudo),
(words(['true', 'false', 'nil'], suffix=r'\b'), Keyword.Constant),
# start of function, class and module names
(r'(module|lib)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'(def|fun|macro)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Whitespace, Name.Namespace), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(annotation|class|struct|union|type|alias|enum)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Whitespace, Name.Namespace), 'classname'),
# https://crystal-lang.org/api/toplevel.html
(words('''
instance_sizeof offsetof pointerof sizeof typeof
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Keyword.Pseudo),
# macros
(r'(?<!\.)(debugger\b|p!|pp!|record\b|spawn\b)', Name.Builtin.Pseudo),
# builtins
(words('''
abort at_exit caller exit gets loop main p pp print printf puts
raise rand read_line sleep spawn sprintf system
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin),
# https://crystal-lang.org/api/Object.html#macro-summary
(r'(?<!\.)(((class_)?((getter|property)\b[!?]?|setter\b))|'
r'(def_(clone|equals|equals_and_hash|hash)|delegate|forward_missing_to)\b)',
Name.Builtin.Pseudo),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!:])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=\.index\s)|'
r'(?<=\.scan\s)|'
r'(?<=\.sub\s)|'
r'(?<=\.sub!\s)|'
r'(?<=\.gsub\s)|'
r'(?<=\.gsub!\s)|'
r'(?<=\.match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Whitespace, String.Regex), 'multiline-regex'),
# multiline regex (in method calls or subscripts)
(r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/)(?![\s=])', bygroups(Whitespace, String.Regex),
'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0o[0-7]+(?:_[0-7]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Oct, Whitespace, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Hex, Whitespace, Operator)),
(r'(0b[01]+(?:_[01]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Bin, Whitespace, Operator)),
# 3 separate expressions for floats because any of the 3 optional
# parts makes it a float
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Whitespace, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Whitespace, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+))(\s*)([/?])?',
bygroups(Number.Float, Whitespace, Operator)),
(r'(0\b|[1-9][\d]*(?:_\d+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Integer, Whitespace, Operator)),
# Names
(r'@@[a-zA-Z_]\w*', Name.Variable.Class),
(r'@[a-zA-Z_]\w*', Name.Variable.Instance),
(r'\$\w+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# https://crystal-lang.org/reference/syntax_and_semantics/literals/char.html
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\abefnrtv#"\']|[0-7]{1,3}|x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|u\{[a-fA-F0-9 ]+\})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z][A-Z_]+\b(?!::|\.)', Name.Constant),
# macro expansion
(r'\{%', String.Interpol, 'in-macro-control'),
(r'\{\{', String.Interpol, 'in-macro-expr'),
# annotations
(r'(@\[)(\s*)([A-Z]\w*(::[A-Z]\w*)*)',
bygroups(Operator, Whitespace, Name.Decorator), 'in-annot'),
# this is needed because Crystal attributes can look
# like keywords (class) or like this: ` ?!?
(words(CRYSTAL_OPERATORS, prefix=r'(\.|::)'),
bygroups(Operator, Name.Operator)),
(r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
bygroups(Operator, Name)),
# Names can end with [!?] unless it's "!="
(r'[a-zA-Z_]\w*(?:[!?](?!=))?', Name),
(r'(\[|\]\??|\*\*|<=>?|>=|<<?|>>?|=~|===|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Whitespace)
],
'funcname': [
(r'(?:([a-zA-Z_]\w*)(\.))?'
r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
default('#pop')
],
'classname': [
(r'[A-Z_]\w*', Name.Class),
(r'(\()(\s*)([A-Z_]\w*)(\s*)(\))',
bygroups(Punctuation, Whitespace, Name.Class, Whitespace, Punctuation)),
default('#pop')
],
'in-intp': [
(r'\{', String.Interpol, '#push'),
(r'\}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#\{', String.Interpol, 'in-intp'),
],
'string-escaped': [
# https://crystal-lang.org/reference/syntax_and_semantics/literals/string.html
(r'\\([\\abefnrtv#"\']|[0-7]{1,3}|x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|u\{[a-fA-F0-9 ]+\})',
String.Escape)
],
'string-intp-escaped': [
include('string-intp'),
include('string-escaped'),
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[imsx]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
],
'in-macro-control': [
(r'\{%', String.Interpol, '#push'),
(r'%\}', String.Interpol, '#pop'),
(r'(for|verbatim)\b', Keyword),
include('root'),
],
'in-macro-expr': [
(r'\{\{', String.Interpol, '#push'),
(r'\}\}', String.Interpol, '#pop'),
include('root'),
],
'in-annot': [
(r'\[', Operator, '#push'),
(r'\]', Operator, '#pop'),
include('root'),
],
}
tokens.update(gen_crystalstrings_rules())
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .base import Type
class Epub(Type):
"""
Implements the EPUB archive type matcher.
"""
MIME = 'application/epub+zip'
EXTENSION = 'epub'
def __init__(self):
super(Epub, self).__init__(
mime=Epub.MIME,
extension=Epub.EXTENSION
)
def match(self, buf):
return (len(buf) > 57 and
buf[0] == 0x50 and buf[1] == 0x4B and
buf[2] == 0x3 and buf[3] == 0x4 and
buf[30] == 0x6D and buf[31] == 0x69 and
buf[32] == 0x6D and buf[33] == 0x65 and
buf[34] == 0x74 and buf[35] == 0x79 and
buf[36] == 0x70 and buf[37] == 0x65 and
buf[38] == 0x61 and buf[39] == 0x70 and
buf[40] == 0x70 and buf[41] == 0x6C and
buf[42] == 0x69 and buf[43] == 0x63 and
buf[44] == 0x61 and buf[45] == 0x74 and
buf[46] == 0x69 and buf[47] == 0x6F and
buf[48] == 0x6E and buf[49] == 0x2F and
buf[50] == 0x65 and buf[51] == 0x70 and
buf[52] == 0x75 and buf[53] == 0x62 and
buf[54] == 0x2B and buf[55] == 0x7A and
buf[56] == 0x69 and buf[57] == 0x70)
class Zip(Type):
"""
Implements the Zip archive type matcher.
"""
MIME = 'application/zip'
EXTENSION = 'zip'
def __init__(self):
super(Zip, self).__init__(
mime=Zip.MIME,
extension=Zip.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x50 and buf[1] == 0x4B and
(buf[2] == 0x3 or buf[2] == 0x5 or
buf[2] == 0x7) and
(buf[3] == 0x4 or buf[3] == 0x6 or
buf[3] == 0x8))
class Tar(Type):
"""
Implements the Tar archive type matcher.
"""
MIME = 'application/x-tar'
EXTENSION = 'tar'
def __init__(self):
super(Tar, self).__init__(
mime=Tar.MIME,
extension=Tar.EXTENSION
)
def match(self, buf):
return (len(buf) > 261 and
buf[257] == 0x75 and
buf[258] == 0x73 and
buf[259] == 0x74 and
buf[260] == 0x61 and
buf[261] == 0x72)
class Rar(Type):
"""
Implements the RAR archive type matcher.
"""
MIME = 'application/x-rar-compressed'
EXTENSION = 'rar'
def __init__(self):
super(Rar, self).__init__(
mime=Rar.MIME,
extension=Rar.EXTENSION
)
def match(self, buf):
return (len(buf) > 6 and
buf[0] == 0x52 and
buf[1] == 0x61 and
buf[2] == 0x72 and
buf[3] == 0x21 and
buf[4] == 0x1A and
buf[5] == 0x7 and
(buf[6] == 0x0 or
buf[6] == 0x1))
class Gz(Type):
"""
Implements the GZ archive type matcher.
"""
MIME = 'application/gzip'
EXTENSION = 'gz'
def __init__(self):
super(Gz, self).__init__(
mime=Gz.MIME,
extension=Gz.EXTENSION
)
def match(self, buf):
return (len(buf) > 2 and
buf[0] == 0x1F and
buf[1] == 0x8B and
buf[2] == 0x8)
class Bz2(Type):
"""
Implements the BZ2 archive type matcher.
"""
MIME = 'application/x-bzip2'
EXTENSION = 'bz2'
def __init__(self):
super(Bz2, self).__init__(
mime=Bz2.MIME,
extension=Bz2.EXTENSION
)
def match(self, buf):
return (len(buf) > 2 and
buf[0] == 0x42 and
buf[1] == 0x5A and
buf[2] == 0x68)
class SevenZ(Type):
"""
Implements the SevenZ (7z) archive type matcher.
"""
MIME = 'application/x-7z-compressed'
EXTENSION = '7z'
def __init__(self):
super(SevenZ, self).__init__(
mime=SevenZ.MIME,
extension=SevenZ.EXTENSION
)
def match(self, buf):
return (len(buf) > 5 and
buf[0] == 0x37 and
buf[1] == 0x7A and
buf[2] == 0xBC and
buf[3] == 0xAF and
buf[4] == 0x27 and
buf[5] == 0x1C)
class Pdf(Type):
"""
Implements the PDF archive type matcher.
"""
MIME = 'application/pdf'
EXTENSION = 'pdf'
def __init__(self):
super(Pdf, self).__init__(
mime=Pdf.MIME,
extension=Pdf.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x25 and
buf[1] == 0x50 and
buf[2] == 0x44 and
buf[3] == 0x46)
class Exe(Type):
"""
Implements the EXE archive type matcher.
"""
MIME = 'application/x-msdownload'
EXTENSION = 'exe'
def __init__(self):
super(Exe, self).__init__(
mime=Exe.MIME,
extension=Exe.EXTENSION
)
def match(self, buf):
return (len(buf) > 1 and
buf[0] == 0x4D and
buf[1] == 0x5A)
class Swf(Type):
"""
Implements the SWF archive type matcher.
"""
MIME = 'application/x-shockwave-flash'
EXTENSION = 'swf'
def __init__(self):
super(Swf, self).__init__(
mime=Swf.MIME,
extension=Swf.EXTENSION
)
def match(self, buf):
return (len(buf) > 2 and
(buf[0] == 0x43 or
buf[0] == 0x46) and
buf[1] == 0x57 and
buf[2] == 0x53)
class Rtf(Type):
"""
Implements the RTF archive type matcher.
"""
MIME = 'application/rtf'
EXTENSION = 'rtf'
def __init__(self):
super(Rtf, self).__init__(
mime=Rtf.MIME,
extension=Rtf.EXTENSION
)
def match(self, buf):
return (len(buf) > 4 and
buf[0] == 0x7B and
buf[1] == 0x5C and
buf[2] == 0x72 and
buf[3] == 0x74 and
buf[4] == 0x66)
class Nes(Type):
"""
Implements the NES archive type matcher.
"""
MIME = 'application/x-nintendo-nes-rom'
EXTENSION = 'nes'
def __init__(self):
super(Nes, self).__init__(
mime=Nes.MIME,
extension=Nes.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x4E and
buf[1] == 0x45 and
buf[2] == 0x53 and
buf[3] == 0x1A)
class Crx(Type):
"""
Implements the CRX archive type matcher.
"""
MIME = 'application/x-google-chrome-extension'
EXTENSION = 'crx'
def __init__(self):
super(Crx, self).__init__(
mime=Crx.MIME,
extension=Crx.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x43 and
buf[1] == 0x72 and
buf[2] == 0x32 and
buf[3] == 0x34)
class Cab(Type):
"""
Implements the CAB archive type matcher.
"""
MIME = 'application/vnd.ms-cab-compressed'
EXTENSION = 'cab'
def __init__(self):
super(Cab, self).__init__(
mime=Cab.MIME,
extension=Cab.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
((buf[0] == 0x4D and
buf[1] == 0x53 and
buf[2] == 0x43 and
buf[3] == 0x46) or
(buf[0] == 0x49 and
buf[1] == 0x53 and
buf[2] == 0x63 and
buf[3] == 0x28)))
class Eot(Type):
"""
Implements the EOT archive type matcher.
"""
MIME = 'application/octet-stream'
EXTENSION = 'eot'
def __init__(self):
super(Eot, self).__init__(
mime=Eot.MIME,
extension=Eot.EXTENSION
)
def match(self, buf):
return (len(buf) > 35 and
buf[34] == 0x4C and
buf[35] == 0x50 and
((buf[8] == 0x02 and
buf[9] == 0x00 and
buf[10] == 0x01) or
(buf[8] == 0x01 and
buf[9] == 0x00 and
buf[10] == 0x00) or
(buf[8] == 0x02 and
buf[9] == 0x00 and
buf[10] == 0x02)))
class Ps(Type):
"""
Implements the PS archive type matcher.
"""
MIME = 'application/postscript'
EXTENSION = 'ps'
def __init__(self):
super(Ps, self).__init__(
mime=Ps.MIME,
extension=Ps.EXTENSION
)
def match(self, buf):
return (len(buf) > 1 and
buf[0] == 0x25 and
buf[1] == 0x21)
class Xz(Type):
"""
Implements the XS archive type matcher.
"""
MIME = 'application/x-xz'
EXTENSION = 'xz'
def __init__(self):
super(Xz, self).__init__(
mime=Xz.MIME,
extension=Xz.EXTENSION
)
def match(self, buf):
return (len(buf) > 5 and
buf[0] == 0xFD and
buf[1] == 0x37 and
buf[2] == 0x7A and
buf[3] == 0x58 and
buf[4] == 0x5A and
buf[5] == 0x00)
class Sqlite(Type):
"""
Implements the Sqlite DB archive type matcher.
"""
MIME = 'application/x-sqlite3'
EXTENSION = 'sqlite'
def __init__(self):
super(Sqlite, self).__init__(
mime=Sqlite.MIME,
extension=Sqlite.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x53 and
buf[1] == 0x51 and
buf[2] == 0x4C and
buf[3] == 0x69)
class Deb(Type):
"""
Implements the DEB archive type matcher.
"""
MIME = 'application/x-deb'
EXTENSION = 'deb'
def __init__(self):
super(Deb, self).__init__(
mime=Deb.MIME,
extension=Deb.EXTENSION
)
def match(self, buf):
return (len(buf) > 20 and
buf[0] == 0x21 and
buf[1] == 0x3C and
buf[2] == 0x61 and
buf[3] == 0x72 and
buf[4] == 0x63 and
buf[5] == 0x68 and
buf[6] == 0x3E and
buf[7] == 0x0A and
buf[8] == 0x64 and
buf[9] == 0x65 and
buf[10] == 0x62 and
buf[11] == 0x69 and
buf[12] == 0x61 and
buf[13] == 0x6E and
buf[14] == 0x2D and
buf[15] == 0x62 and
buf[16] == 0x69 and
buf[17] == 0x6E and
buf[18] == 0x61 and
buf[19] == 0x72 and
buf[20] == 0x79)
class Ar(Type):
"""
Implements the AR archive type matcher.
"""
MIME = 'application/x-unix-archive'
EXTENSION = 'ar'
def __init__(self):
super(Ar, self).__init__(
mime=Ar.MIME,
extension=Ar.EXTENSION
)
def match(self, buf):
return (len(buf) > 6 and
buf[0] == 0x21 and
buf[1] == 0x3C and
buf[2] == 0x61 and
buf[3] == 0x72 and
buf[4] == 0x63 and
buf[5] == 0x68 and
buf[6] == 0x3E)
class Z(Type):
"""
Implements the Z archive type matcher.
"""
MIME = 'application/x-compress'
EXTENSION = 'Z'
def __init__(self):
super(Z, self).__init__(
mime=Z.MIME,
extension=Z.EXTENSION
)
def match(self, buf):
return (len(buf) > 1 and
((buf[0] == 0x1F and
buf[1] == 0xA0) or
(buf[0] == 0x1F and
buf[1] == 0x9D)))
class Lzop(Type):
"""
Implements the Lzop archive type matcher.
"""
MIME = 'application/x-lzop'
EXTENSION = 'lzo'
def __init__(self):
super(Lzop, self).__init__(
mime=Lzop.MIME,
extension=Lzop.EXTENSION
)
def match(self, buf):
return (len(buf) > 7 and
buf[0] == 0x89 and
buf[1] == 0x4C and
buf[2] == 0x5A and
buf[3] == 0x4F and
buf[4] == 0x00 and
buf[5] == 0x0D and
buf[6] == 0x0A and
buf[7] == 0x1A)
class Lz(Type):
"""
Implements the Lz archive type matcher.
"""
MIME = 'application/x-lzip'
EXTENSION = 'lz'
def __init__(self):
super(Lz, self).__init__(
mime=Lz.MIME,
extension=Lz.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x4C and
buf[1] == 0x5A and
buf[2] == 0x49 and
buf[3] == 0x50)
class Elf(Type):
"""
Implements the Elf archive type matcher
"""
MIME = 'application/x-executable'
EXTENSION = 'elf'
def __init__(self):
super(Elf, self).__init__(
mime=Elf.MIME,
extension=Elf.EXTENSION
)
def match(self, buf):
return (len(buf) > 52 and
buf[0] == 0x7F and
buf[1] == 0x45 and
buf[2] == 0x4C and
buf[3] == 0x46)
class Lz4(Type):
"""
Implements the Lz4 archive type matcher.
"""
MIME = 'application/x-lz4'
EXTENSION = 'lz4'
def __init__(self):
super(Lz4, self).__init__(
mime=Lz4.MIME,
extension=Lz4.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x04 and
buf[1] == 0x22 and
buf[2] == 0x4D and
buf[3] == 0x18)
class Br(Type):
"""Implements the Br image type matcher."""
MIME = 'application/x-brotli'
EXTENSION = 'br'
def __init__(self):
super(Br, self).__init__(
mime=Br.MIME,
extension=Br.EXTENSION
)
def match(self, buf):
return buf[:4] == bytearray([0xce, 0xb2, 0xcf, 0x81])
class Dcm(Type):
"""Implements the Dcm image type matcher."""
MIME = 'application/dicom'
EXTENSION = 'dcm'
def __init__(self):
super(Dcm, self).__init__(
mime=Dcm.MIME,
extension=Dcm.EXTENSION
)
def match(self, buf):
return buf[128:131] == bytearray([0x44, 0x49, 0x43, 0x4d])
class Rpm(Type):
"""Implements the Rpm image type matcher."""
MIME = 'application/x-rpm'
EXTENSION = 'rpm'
def __init__(self):
super(Rpm, self).__init__(
mime=Rpm.MIME,
extension=Rpm.EXTENSION
)
def match(self, buf):
return buf[:4] == bytearray([0xed, 0xab, 0xee, 0xdb])
|
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .... import core
from ....framework import IrGraph
from ....framework import IrNode
__all__ = ['TransformForMkldnnPass']
class TransformForMkldnnPass(object):
"""
Convert QuantizationFreezePass generated IrGraph to MKL-DNN supported INT8
IrGraph. Following transformations did in this pass:
1. Convert int8 range weights with float32 data type, which are generated by
the QuantizationFreezePass, to float32 range weights with float32 data type
by using the corresponding scales. This conversion is because MKL-DNN INT8
conv2d kernel and mul kernel now only support float32 weights input, hence
weights quantization will happen inside the conv2d and mul INT8 kernel.
2. Create the new conv2d or mul op with the converted weights and link its output
to fake_dequantize_abs_max op's output and set conv2d's attribute "force_fp32
_output" as true
3. Transform fake_quantize_xx op to quantize op
4. Remove fake_dequantize_abs_max op
"""
def __init__(self, scope=None, place=None):
"""
Args:
scope(fluid.Scope): scope is used to initialize the new parameters.
place(fluid.CPUPlace): place is used to initialize the new parameters.
Examples:
.. code-block:: python
# The original graph will be rewrite.
import paddle.fluid as fluid
from paddle.fluid.contrib.slim.quantization \
import TransformForMkldnnPass
from paddle.fluid.framework import IrGraph
from paddle.fluid import core
graph = IrGraph(core.Graph(fluid.Program().desc), for_test=False)
place = fluid.CPUPlace()
mkldnn_pass = TransformForMkldnnPass(fluid.global_scope(),
place)
mkldnn_pass.apply(graph)
"""
self._scope = scope
self._place = place
self.quantize_type = [
'fake_quantize_moving_average_abs_max',
'fake_quantize_range_abs_max'
]
self.dequantize_type = ['fake_dequantize_max_abs']
self._quantizable_ops = ['conv2d', 'depthwise_conv2d', 'mul']
self._conv_ops = ['conv2d', 'depthwise_conv2d']
self.InScale = {}
self.max_range = {}
self.new_output = {}
self.s8_max = 127
def apply(self, graph):
"""
Quantize the graph for running MKL-DNN INT8 inference. According
to activation quantization type, the graph will transform fake
quantize ops to quantize ops and remove the fake dequantize ops.
Args:
graph(IrGraph): the applied graph.
"""
assert isinstance(graph,
IrGraph), 'graph must be the instance of IrGraph.'
ops = graph.all_op_nodes()
persistable_vars = [p.name() for p in graph.all_persistable_nodes()]
# Collect the InScales and max_range to calculate the new scales for MKL-DNN
# INT8 conv2d and mul
for op_node in ops:
if op_node.name() in self.dequantize_type:
input_name = op_node.input("X")[0]
scale_name = op_node.input("Scale")[0]
self.InScale[input_name] = self._load_param(self._scope,
scale_name)[0]
self.max_range[input_name] = op_node.op().attr("max_range")
self.new_output[input_name] = op_node.output("Out")[0]
for op_node in ops:
if op_node.name() in self._quantizable_ops:
if op_node.name() in self._conv_ops:
self._transform_to_conv_mkldnn(graph, op_node)
else:
self._transform_to_mul_mkldnn(graph, op_node)
elif op_node.name() in self.quantize_type:
self._transform_to_quantize_mkldnn(graph, op_node)
elif op_node.name() in self.dequantize_type:
self._remove_fake_dequantize_op(graph, op_node)
self._remove_unused_var_nodes(graph)
return graph
def _transform_to_conv_mkldnn(self, graph, op_node):
weight_name = op_node.input("Filter")[0]
output_name = op_node.output("Output")[0]
# Convert int8 range weights to fp32 range weights
weight = self._load_param(self._scope, weight_name)
w_fp32 = np.divide(
np.multiply(weight, self.s8_max), self.max_range[output_name])
w_fp32 = w_fp32.reshape(weight.shape)
self._restore_var(weight_name, w_fp32)
input_var_node = graph._find_node_by_name(op_node.inputs,
op_node.input("Input")[0])
weight_var_node = graph._find_node_by_name(op_node.inputs, weight_name)
# Set fake_dequantize_abs_max's output as new output of conv2d
output_var_node = graph._find_node_by_name(graph.all_var_nodes(),
self.new_output[output_name])
attrs = {
name: op_node.op().attr(name)
for name in op_node.op().attr_names()
}
conv_op_node = graph.create_op_node(
op_type='conv2d',
attrs=attrs,
inputs={'Input': input_var_node,
'Filter': weight_var_node},
outputs={'Output': output_var_node})
# Based on the QAT's scales to calculate the scales of MKL-DNN INT8 conv2d
scale_in = self.s8_max / self.InScale[output_name]
scale_w = []
scale_w = [self.max_range[output_name] / self.s8_max]
conv_op_node.set_attr("Scale_weights", scale_w)
conv_op_node.set_attr("Scale_in", scale_in)
conv_op_node.set_attr("Scale_out", 1.0)
conv_op_node.set_attr("use_mkldnn", 1)
conv_op_node.set_attr("force_fp32_output", 1)
graph.link_to(input_var_node, conv_op_node)
graph.link_to(weight_var_node, conv_op_node)
graph.link_to(conv_op_node, output_var_node)
graph.safe_remove_nodes(op_node)
def _transform_to_mul_mkldnn(self, graph, op_node):
# For MKL-DNN INT8 mul, input Y should be the weights
weight_name = op_node.input("Y")[0]
output_name = op_node.output("Out")[0]
# Convert int8 range weights to fp32 range weights
weight = self._load_param(self._scope, weight_name)
w_fp32 = np.divide(
np.multiply(weight, self.s8_max), self.max_range[output_name])
w_fp32 = w_fp32.reshape(weight.shape)
self._restore_var(weight_name, w_fp32)
input_var_node = graph._find_node_by_name(op_node.inputs,
op_node.input("X")[0])
weight_var_node = graph._find_node_by_name(op_node.inputs, weight_name)
# Set fake_dequantize_abs_max's output as new output of mul
output_var_node = graph._find_node_by_name(graph.all_var_nodes(),
self.new_output[output_name])
attrs = {
name: op_node.op().attr(name)
for name in op_node.op().attr_names()
}
mul_op_node = graph.create_op_node(
op_type='mul',
attrs=attrs,
inputs={'X': input_var_node,
'Y': weight_var_node},
outputs={'Out': output_var_node})
# Based on the QAT's scales to calculate MKL-DNN INT8 mul's scales
scale_in = self.s8_max / self.InScale[output_name]
scale_w = []
scale_w = [self.max_range[output_name] / self.s8_max]
mul_op_node.set_attr("scale_y", scale_w)
mul_op_node.set_attr("scale_x", scale_in)
mul_op_node.set_attr("scale_out", 1.0)
mul_op_node.set_attr("use_mkldnn", 1)
mul_op_node.set_attr("force_fp32_output", 1)
graph.link_to(input_var_node, mul_op_node)
graph.link_to(weight_var_node, mul_op_node)
graph.link_to(mul_op_node, output_var_node)
graph.safe_remove_nodes(op_node)
def _transform_to_quantize_mkldnn(self, graph, op_node):
"""
Transform fake_quantize_xx op to quantize mkldnn op in the graph.
"""
input_var_node = graph._find_node_by_name(op_node.inputs,
op_node.input("X")[0])
output_var_node = graph._find_node_by_name(op_node.outputs,
op_node.output("Out")[0])
scale_in = self.s8_max / self._load_param(
self._scope, op_node.input("InScale")[0])[0]
quant_op_node = graph.create_op_node(
op_type='quantize',
attrs={
'data_format': 'MKLDNNLAYOUT',
'use_mkldnn': 1,
'Scale': scale_in,
'is_negative_input': 1
},
inputs={'Input': input_var_node},
outputs={'Output': output_var_node})
graph.link_to(input_var_node, quant_op_node)
graph.link_to(quant_op_node, output_var_node)
graph.safe_remove_nodes(op_node)
def _remove_fake_dequantize_op(self, graph, op_node):
input_var_node = graph._find_node_by_name(op_node.inputs,
op_node.input("X")[0])
graph.safe_remove_nodes(op_node)
def _load_param(self, scope, param_name):
return np.array(scope.find_var(param_name).get_tensor())
def _restore_var(self, name, array):
tensor = self._scope.find_var(name).get_tensor()
tensor.set(array, self._place)
def _remove_unused_var_nodes(self, graph):
all_used_vars = set()
ops = graph.all_op_nodes()
for op_node in ops:
for input_node in op_node.inputs:
all_used_vars.add(input_node)
for output_node in op_node.outputs:
all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = {
n
for n in filter(lambda node: node.node not in all_used_vars,
graph.all_var_nodes())
}
graph.safe_remove_nodes(all_unused_vars)
|
|
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module is intended to standardize workflows when working with various databases
such as Impala, Postgresql, etc. Even with pep-249 (DB API 2), workflows differ
slightly. For example Postgresql does not allow changing databases from within a
connection, instead a new connection must be made. However Impala does not allow
specifying a database upon connection, instead a cursor must be created and a USE
command must be issued.
'''
from contextlib import contextmanager
try:
from impala.dbapi import connect as impala_connect
except:
print('Error importing impyla. Please make sure it is installed. '
'See the README for details.')
raise
from itertools import izip
from logging import getLogger
from tests.comparison.model import Column, Table, TYPES, String
LOG = getLogger(__name__)
IMPALA = 'IMPALA'
POSTGRESQL = 'POSTGRESQL'
MYSQL = 'MYSQL'
DATABASES = [IMPALA, POSTGRESQL, MYSQL]
mysql_connect = None
postgresql_connect = None
class DbConnector(object):
'''Wraps a DB API 2 implementation to provide a standard way of obtaining a
connection and selecting a database.
Any database that supports transactions will have auto-commit enabled.
'''
def __init__(self, db_type, user_name=None, password=None, host_name=None, port=None):
self.db_type = db_type.upper()
if self.db_type not in DATABASES:
raise Exception('Unsupported database: %s' % db_type)
self.user_name = user_name
self.password = password
self.host_name = host_name or 'localhost'
self.port = port
def create_connection(self, db_name=None):
if self.db_type == IMPALA:
connection_class = ImpalaDbConnection
connection = impala_connect(host=self.host_name, port=self.port or 21050)
elif self.db_type == POSTGRESQL:
connection_class = PostgresqlDbConnection
connection_args = {'user': self.user_name or 'postgres'}
if self.password:
connection_args['password'] = self.password
if db_name:
connection_args['database'] = db_name
if self.host_name:
connection_args['host'] = self.host_name
if self.port:
connection_args['port'] = self.port
global postgresql_connect
if not postgresql_connect:
try:
from psycopg2 import connect as postgresql_connect
except:
print('Error importing psycopg2. Please make sure it is installed. '
'See the README for details.')
raise
connection = postgresql_connect(**connection_args)
connection.autocommit = True
elif self.db_type == MYSQL:
connection_class = MySQLDbConnection
connection_args = {'user': self.user_name or 'root'}
if self.password:
connection_args['passwd'] = self.password
if db_name:
connection_args['db'] = db_name
if self.host_name:
connection_args['host'] = self.host_name
if self.port:
connection_args['port'] = self.port
global mysql_connect
if not mysql_connect:
try:
from MySQLdb import connect as mysql_connect
except:
print('Error importing MySQLdb. Please make sure it is installed. '
'See the README for details.')
raise
connection = mysql_connect(**connection_args)
else:
raise Exception('Unexpected database type: %s' % self.db_type)
return connection_class(self, connection, db_name=db_name)
@contextmanager
def open_connection(self, db_name=None):
connection = None
try:
connection = self.create_connection(db_name=db_name)
yield connection
finally:
if connection:
try:
connection.close()
except Exception as e:
LOG.debug('Error closing connection: %s', e, exc_info=True)
class DbConnection(object):
'''Wraps a DB API 2 connection. Instances should only be obtained through the
DbConnector.create_connection(...) method.
'''
@staticmethod
def describe_common_tables(db_connections, filter_col_types=[]):
'''Find and return a list of Table objects that the given connections have in
common.
@param filter_col_types: Ignore any cols if they are of a data type contained
in this collection.
'''
common_table_names = None
for db_connection in db_connections:
table_names = set(db_connection.list_table_names())
if common_table_names is None:
common_table_names = table_names
else:
common_table_names &= table_names
common_table_names = sorted(common_table_names)
tables = list()
for table_name in common_table_names:
common_table = None
mismatch = False
for db_connection in db_connections:
table = db_connection.describe_table(table_name)
table.cols = [col for col in table.cols if col.type not in filter_col_types]
if common_table is None:
common_table = table
continue
if len(common_table.cols) != len(table.cols):
LOG.debug('Ignoring table %s.'
' It has a different number of columns across databases.', table_name)
mismatch = True
break
for left, right in izip(common_table.cols, table.cols):
if not left.name == right.name and left.type == right.type:
LOG.debug('Ignoring table %s. It has different columns %s vs %s.' %
(table_name, left, right))
mismatch = True
break
if mismatch:
break
if not mismatch:
tables.append(common_table)
return tables
def __init__(self, connector, connection, db_name=None):
self.connector = connector
self.connection = connection
self.db_name = db_name
@property
def db_type(self):
return self.connector.db_type
def create_cursor(self):
return DatabaseCursor(self.connection.cursor(), self)
@contextmanager
def open_cursor(self):
'''Returns a new cursor for use in a "with" statement. When the "with" statement ends,
the cursor will be closed.
'''
cursor = None
try:
cursor = self.create_cursor()
yield cursor
finally:
self.close_cursor_quietly(cursor)
def close_cursor_quietly(self, cursor):
if cursor:
try:
cursor.close()
except Exception as e:
LOG.debug('Error closing cursor: %s', e, exc_info=True)
def list_db_names(self):
'''Return a list of database names always in lowercase.'''
rows = self.execute_and_fetchall(self.make_list_db_names_sql())
return [row[0].lower() for row in rows]
def make_list_db_names_sql(self):
return 'SHOW DATABASES'
def list_table_names(self):
'''Return a list of table names always in lowercase.'''
rows = self.execute_and_fetchall(self.make_list_table_names_sql())
return [row[0].lower() for row in rows]
def make_list_table_names_sql(self):
return 'SHOW TABLES'
def describe_table(self, table_name):
'''Return a Table with table and col names always in lowercase.'''
rows = self.execute_and_fetchall(self.make_describe_table_sql(table_name))
table = Table(table_name.lower())
for row in rows:
col_name, data_type = row[:2]
table.cols.append(Column(table, col_name.lower(), self.parse_data_type(data_type)))
return table
def make_describe_table_sql(self, table_name):
return 'DESCRIBE ' + table_name
def parse_data_type(self, sql):
sql = sql.upper()
# Types may have declared a database specific alias
for type_ in TYPES:
if sql in getattr(type_, self.db_type, []):
return type_
for type_ in TYPES:
if type_.__name__.upper() == sql:
return type_
if 'CHAR' in sql:
return String
raise Exception('Unknown data type: ' + sql)
def create_database(self, db_name):
db_name = db_name.lower()
with self.open_cursor() as cursor:
cursor.execute('CREATE DATABASE ' + db_name)
def drop_db_if_exists(self, db_name):
'''This should not be called from a connection to the database being dropped.'''
db_name = db_name.lower()
if db_name not in self.list_db_names():
return
if self.db_name and self.db_name.lower() == db_name:
raise Exception('Cannot drop database while still connected to it')
self.drop_database(db_name)
def drop_database(self, db_name):
db_name = db_name.lower()
self.execute('DROP DATABASE ' + db_name)
@property
def supports_index_creation(self):
return True
def index_table(self, table_name):
table = self.describe_table(table_name)
with self.open_cursor() as cursor:
for col in table.cols:
index_name = '%s_%s' % (table_name, col.name)
if self.db_name:
index_name = '%s_%s' % (self.db_name, index_name)
cursor.execute('CREATE INDEX %s ON %s(%s)' % (index_name, table_name, col.name))
@property
def supports_kill_connection(self):
return False
def kill_connection(self):
'''Kill the current connection and any currently running queries assosiated with the
connection.
'''
raise Exception('Killing connection is not supported')
def materialize_query(self, query_as_text, table_name):
self.execute('CREATE TABLE %s AS %s' % (table_name.lower(), query_as_text))
def drop_table(self, table_name):
self.execute('DROP TABLE ' + table_name.lower())
def execute(self, sql):
with self.open_cursor() as cursor:
cursor.execute(sql)
def execute_and_fetchall(self, sql):
with self.open_cursor() as cursor:
cursor.execute(sql)
return cursor.fetchall()
def close(self):
'''Close the underlying connection.'''
self.connection.close()
def reconnect(self):
self.close()
other = self.connector.create_connection(db_name=self.db_name)
self.connection = other.connection
class DatabaseCursor(object):
'''Wraps a DB API 2 cursor to provide access to the related connection. This class
implements the DB API 2 interface by delegation.
'''
def __init__(self, cursor, connection):
self.cursor = cursor
self.connection = connection
def __getattr__(self, attr):
return getattr(self.cursor, attr)
class ImpalaDbConnection(DbConnection):
def create_cursor(self):
cursor = DbConnection.create_cursor(self)
if self.db_name:
cursor.execute('USE %s' % self.db_name)
return cursor
def drop_database(self, db_name):
'''This should not be called from a connection to the database being dropped.'''
db_name = db_name.lower()
with self.connector.open_connection(db_name) as list_tables_connection:
with list_tables_connection.open_cursor() as drop_table_cursor:
for table_name in list_tables_connection.list_table_names():
drop_table_cursor.execute('DROP TABLE ' + table_name)
self.execute('DROP DATABASE ' + db_name)
@property
def supports_index_creation(self):
return False
class PostgresqlDbConnection(DbConnection):
def make_list_db_names_sql(self):
return 'SELECT datname FROM pg_database'
def make_list_table_names_sql(self):
return '''
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public' '''
def make_describe_table_sql(self, table_name):
return '''
SELECT column_name, data_type
FROM information_schema.columns
WHERE table_name = '%s'
ORDER BY ordinal_position''' % table_name
class MySQLDbConnection(DbConnection):
def __init__(self, connector, connection, db_name=None):
DbConnection.__init__(self, connector, connection, db_name=db_name)
self.session_id = self.execute_and_fetchall('SELECT connection_id()')[0][0]
def describe_table(self, table_name):
'''Return a Table with table and col names always in lowercase.'''
rows = self.execute_and_fetchall(self.make_describe_table_sql(table_name))
table = Table(table_name.lower())
for row in rows:
col_name, data_type = row[:2]
if data_type == 'tinyint(1)':
# Just assume this is a boolean...
data_type = 'boolean'
if '(' in data_type:
# Strip the size of the data type
data_type = data_type[:data_type.index('(')]
table.cols.append(Column(table, col_name.lower(), self.parse_data_type(data_type)))
return table
@property
def supports_kill_connection(self):
return True
def kill_connection(self):
with self.connector.open_connection(db_name=self.db_name) as connection:
connection.execute('KILL %s' % (self.session_id))
def index_table(self, table_name):
table = self.describe_table(table_name)
with self.open_cursor() as cursor:
for col in table.cols:
try:
cursor.execute('ALTER TABLE %s ADD INDEX (%s)' % (table_name, col.name))
except Exception as e:
if 'Incorrect index name' not in str(e):
raise
# Some sort of MySQL bug...
LOG.warn('Could not create index on %s.%s: %s' % (table_name, col.name, e))
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import itertools
from unittest import mock
from http import HTTPStatus
from oslo_config import cfg
from oslo_serialization import jsonutils
from watcher.applier import rpcapi as aapi
from watcher.common import utils
from watcher.db import api as db_api
from watcher import objects
from watcher.tests.api import base as api_base
from watcher.tests.objects import utils as obj_utils
class TestListActionPlan(api_base.FunctionalTest):
def setUp(self):
super(TestListActionPlan, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
def test_empty(self):
response = self.get_json('/action_plans')
self.assertEqual([], response['action_plans'])
def _assert_action_plans_fields(self, action_plan):
action_plan_fields = [
'uuid', 'audit_uuid', 'strategy_uuid', 'strategy_name',
'state', 'global_efficacy', 'efficacy_indicators']
for field in action_plan_fields:
self.assertIn(field, action_plan)
def test_one(self):
action_plan = obj_utils.create_test_action_plan(self.context)
response = self.get_json('/action_plans')
self.assertEqual(action_plan.uuid,
response['action_plans'][0]["uuid"])
self._assert_action_plans_fields(response['action_plans'][0])
def test_one_soft_deleted(self):
action_plan = obj_utils.create_test_action_plan(self.context)
action_plan.soft_delete()
response = self.get_json('/action_plans',
headers={'X-Show-Deleted': 'True'})
self.assertEqual(action_plan.uuid,
response['action_plans'][0]["uuid"])
self._assert_action_plans_fields(response['action_plans'][0])
response = self.get_json('/action_plans')
self.assertEqual([], response['action_plans'])
def test_get_one_ok(self):
action_plan = obj_utils.create_test_action_plan(self.context)
obj_utils.create_test_efficacy_indicator(
self.context, action_plan_id=action_plan['id'])
response = self.get_json('/action_plans/%s' % action_plan['uuid'])
self.assertEqual(action_plan.uuid, response['uuid'])
self._assert_action_plans_fields(response)
self.assertEqual(
[{'description': 'Test indicator',
'name': 'test_indicator',
'value': 0.0,
'unit': '%'}],
response['efficacy_indicators'])
def test_get_one_soft_deleted(self):
action_plan = obj_utils.create_test_action_plan(self.context)
action_plan.soft_delete()
response = self.get_json('/action_plans/%s' % action_plan['uuid'],
headers={'X-Show-Deleted': 'True'})
self.assertEqual(action_plan.uuid, response['uuid'])
self._assert_action_plans_fields(response)
response = self.get_json('/action_plans/%s' % action_plan['uuid'],
expect_errors=True)
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int)
def test_detail(self):
action_plan = obj_utils.create_test_action_plan(self.context)
response = self.get_json('/action_plans/detail')
self.assertEqual(action_plan.uuid,
response['action_plans'][0]["uuid"])
self._assert_action_plans_fields(response['action_plans'][0])
def test_detail_soft_deleted(self):
action_plan = obj_utils.create_test_action_plan(self.context)
action_plan.soft_delete()
response = self.get_json('/action_plans/detail',
headers={'X-Show-Deleted': 'True'})
self.assertEqual(action_plan.uuid,
response['action_plans'][0]["uuid"])
self._assert_action_plans_fields(response['action_plans'][0])
response = self.get_json('/action_plans/detail')
self.assertEqual([], response['action_plans'])
def test_detail_against_single(self):
action_plan = obj_utils.create_test_action_plan(self.context)
response = self.get_json(
'/action_plan/%s/detail' % action_plan['uuid'],
expect_errors=True)
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int)
def test_many(self):
action_plan_list = []
for id_ in range(5):
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
action_plan_list.append(action_plan.uuid)
response = self.get_json('/action_plans')
self.assertEqual(len(action_plan_list), len(response['action_plans']))
uuids = [s['uuid'] for s in response['action_plans']]
self.assertEqual(sorted(action_plan_list), sorted(uuids))
def test_many_with_soft_deleted_audit_uuid(self):
action_plan_list = []
audit1 = obj_utils.create_test_audit(
self.context, id=2,
uuid=utils.generate_uuid(), name='My Audit {0}'.format(2))
audit2 = obj_utils.create_test_audit(
self.context, id=3,
uuid=utils.generate_uuid(), name='My Audit {0}'.format(3))
for id_ in range(0, 2):
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit1.id)
action_plan_list.append(action_plan.uuid)
for id_ in range(2, 4):
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit2.id)
action_plan_list.append(action_plan.uuid)
new_state = objects.audit.State.CANCELLED
self.patch_json(
'/audits/%s' % audit1.uuid,
[{'path': '/state', 'value': new_state,
'op': 'replace'}])
self.delete('/audits/%s' % audit1.uuid)
response = self.get_json('/action_plans')
self.assertEqual(len(action_plan_list), len(response['action_plans']))
for id_ in range(0, 2):
action_plan = response['action_plans'][id_]
self.assertIsNone(action_plan['audit_uuid'])
for id_ in range(2, 4):
action_plan = response['action_plans'][id_]
self.assertEqual(audit2.uuid, action_plan['audit_uuid'])
def test_many_with_audit_uuid(self):
action_plan_list = []
audit = obj_utils.create_test_audit(
self.context, id=2,
uuid=utils.generate_uuid(), name='My Audit {0}'.format(2))
for id_ in range(2, 5):
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit.id)
action_plan_list.append(action_plan.uuid)
response = self.get_json('/action_plans')
self.assertEqual(len(action_plan_list), len(response['action_plans']))
for action in response['action_plans']:
self.assertEqual(audit.uuid, action['audit_uuid'])
def test_many_with_audit_uuid_filter(self):
action_plan_list1 = []
audit1 = obj_utils.create_test_audit(
self.context, id=2,
uuid=utils.generate_uuid(), name='My Audit {0}'.format(2))
for id_ in range(2, 5):
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit1.id)
action_plan_list1.append(action_plan.uuid)
audit2 = obj_utils.create_test_audit(
self.context, id=3,
uuid=utils.generate_uuid(), name='My Audit {0}'.format(3))
action_plan_list2 = []
for id_ in [5, 6, 7]:
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit2.id)
action_plan_list2.append(action_plan.uuid)
response = self.get_json('/action_plans?audit_uuid=%s' % audit2.uuid)
self.assertEqual(len(action_plan_list2), len(response['action_plans']))
for action in response['action_plans']:
self.assertEqual(audit2.uuid, action['audit_uuid'])
def test_many_without_soft_deleted(self):
action_plan_list = []
for id_ in [1, 2, 3]:
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
action_plan_list.append(action_plan.uuid)
for id_ in [4, 5]:
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
action_plan.soft_delete()
response = self.get_json('/action_plans')
self.assertEqual(3, len(response['action_plans']))
uuids = [s['uuid'] for s in response['action_plans']]
self.assertEqual(sorted(action_plan_list), sorted(uuids))
def test_many_with_soft_deleted(self):
action_plan_list = []
for id_ in [1, 2, 3]:
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
action_plan_list.append(action_plan.uuid)
for id_ in [4, 5]:
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
action_plan.soft_delete()
action_plan_list.append(action_plan.uuid)
response = self.get_json('/action_plans',
headers={'X-Show-Deleted': 'True'})
self.assertEqual(5, len(response['action_plans']))
uuids = [s['uuid'] for s in response['action_plans']]
self.assertEqual(sorted(action_plan_list), sorted(uuids))
def test_many_with_sort_key_audit_uuid(self):
audit_list = []
for id_ in range(2, 5):
audit = obj_utils.create_test_audit(
self.context, id=id_,
uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_))
obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit.id)
audit_list.append(audit.uuid)
response = self.get_json('/action_plans/?sort_key=audit_uuid')
self.assertEqual(3, len(response['action_plans']))
uuids = [s['audit_uuid'] for s in response['action_plans']]
self.assertEqual(sorted(audit_list), uuids)
def test_sort_key_validation(self):
response = self.get_json(
'/action_plans?sort_key=%s' % 'bad_name',
expect_errors=True)
self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int)
def test_links(self):
uuid = utils.generate_uuid()
obj_utils.create_test_action_plan(self.context, id=1, uuid=uuid)
response = self.get_json('/action_plans/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for l in response['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
response = self.get_json('/action_plans/?limit=3')
self.assertEqual(3, len(response['action_plans']))
next_marker = response['action_plans'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
response = self.get_json('/action_plans')
self.assertEqual(3, len(response['action_plans']))
next_marker = response['action_plans'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestDelete(api_base.FunctionalTest):
def setUp(self):
super(TestDelete, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
self.action_plan = obj_utils.create_test_action_plan(
self.context)
p = mock.patch.object(db_api.BaseConnection, 'destroy_action_plan')
self.mock_action_plan_delete = p.start()
self.mock_action_plan_delete.side_effect = \
self._simulate_rpc_action_plan_delete
self.addCleanup(p.stop)
def _simulate_rpc_action_plan_delete(self, audit_uuid):
action_plan = objects.ActionPlan.get_by_uuid(self.context, audit_uuid)
action_plan.destroy()
def test_delete_action_plan_without_action(self):
response = self.delete('/action_plans/%s' % self.action_plan.uuid,
expect_errors=True)
self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.action_plan.state = objects.action_plan.State.SUCCEEDED
self.action_plan.save()
self.delete('/action_plans/%s' % self.action_plan.uuid)
response = self.get_json('/action_plans/%s' % self.action_plan.uuid,
expect_errors=True)
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_action_plan_with_action(self):
action = obj_utils.create_test_action(
self.context, id=1)
self.action_plan.state = objects.action_plan.State.SUCCEEDED
self.action_plan.save()
self.delete('/action_plans/%s' % self.action_plan.uuid)
ap_response = self.get_json('/action_plans/%s' % self.action_plan.uuid,
expect_errors=True)
acts_response = self.get_json(
'/actions/?action_plan_uuid=%s' % self.action_plan.uuid)
act_response = self.get_json(
'/actions/%s' % action.uuid,
expect_errors=True)
# The action plan does not exist anymore
self.assertEqual(HTTPStatus.NOT_FOUND, ap_response.status_int)
self.assertEqual('application/json', ap_response.content_type)
self.assertTrue(ap_response.json['error_message'])
# Nor does the action
self.assertEqual(0, len(acts_response['actions']))
self.assertEqual(HTTPStatus.NOT_FOUND, act_response.status_int)
self.assertEqual('application/json', act_response.content_type)
self.assertTrue(act_response.json['error_message'])
def test_delete_action_plan_not_found(self):
uuid = utils.generate_uuid()
response = self.delete('/action_plans/%s' % uuid, expect_errors=True)
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestStart(api_base.FunctionalTest):
def setUp(self):
super(TestStart, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
self.action_plan = obj_utils.create_test_action_plan(
self.context, state=objects.action_plan.State.RECOMMENDED)
p = mock.patch.object(db_api.BaseConnection, 'update_action_plan')
self.mock_action_plan_update = p.start()
self.mock_action_plan_update.side_effect = \
self._simulate_rpc_action_plan_update
self.addCleanup(p.stop)
def _simulate_rpc_action_plan_update(self, action_plan):
action_plan.save()
return action_plan
@mock.patch('watcher.common.policy.enforce')
def test_start_action_plan_not_found(self, mock_policy):
mock_policy.return_value = True
uuid = utils.generate_uuid()
response = self.post('/v1/action_plans/%s/%s' %
(uuid, 'start'), expect_errors=True)
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch('watcher.common.policy.enforce')
def test_start_action_plan(self, mock_policy):
mock_policy.return_value = True
action = obj_utils.create_test_action(
self.context, id=1)
self.action_plan.state = objects.action_plan.State.SUCCEEDED
response = self.post('/v1/action_plans/%s/%s/'
% (self.action_plan.uuid, 'start'),
expect_errors=True)
self.assertEqual(HTTPStatus.OK, response.status_int)
act_response = self.get_json(
'/actions/%s' % action.uuid,
expect_errors=True)
self.assertEqual(HTTPStatus.OK, act_response.status_int)
self.assertEqual('PENDING', act_response.json['state'])
self.assertEqual('application/json', act_response.content_type)
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
self.action_plan = obj_utils.create_test_action_plan(
self.context, state=objects.action_plan.State.RECOMMENDED)
p = mock.patch.object(db_api.BaseConnection, 'update_action_plan')
self.mock_action_plan_update = p.start()
self.mock_action_plan_update.side_effect = \
self._simulate_rpc_action_plan_update
self.addCleanup(p.stop)
def _simulate_rpc_action_plan_update(self, action_plan):
action_plan.save()
return action_plan
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_denied(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
new_state = objects.action_plan.State.DELETED
response = self.get_json(
'/action_plans/%s' % self.action_plan.uuid)
self.assertNotEqual(new_state, response['state'])
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/state', 'value': new_state, 'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
def test_replace_non_existent_action_plan_denied(self):
response = self.patch_json(
'/action_plans/%s' % utils.generate_uuid(),
[{'path': '/state',
'value': objects.action_plan.State.PENDING,
'op': 'replace'}],
expect_errors=True)
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_add_non_existent_property_denied(self):
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
def test_remove_denied(self):
# We should not be able to remove the state of an action plan
response = self.get_json(
'/action_plans/%s' % self.action_plan.uuid)
self.assertIsNotNone(response['state'])
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/state', 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
def test_remove_uuid_denied(self):
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_non_existent_property_denied(self):
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/non-existent', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_code)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch.object(aapi.ApplierAPI, 'launch_action_plan')
def test_replace_state_pending_ok(self, applier_mock):
new_state = objects.action_plan.State.PENDING
response = self.get_json(
'/action_plans/%s' % self.action_plan.uuid)
self.assertNotEqual(new_state, response['state'])
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/state', 'value': new_state,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(HTTPStatus.OK, response.status_code)
applier_mock.assert_called_once_with(mock.ANY,
self.action_plan.uuid)
ALLOWED_TRANSITIONS = [
{"original_state": objects.action_plan.State.RECOMMENDED,
"new_state": objects.action_plan.State.PENDING},
{"original_state": objects.action_plan.State.RECOMMENDED,
"new_state": objects.action_plan.State.CANCELLED},
{"original_state": objects.action_plan.State.ONGOING,
"new_state": objects.action_plan.State.CANCELLING},
{"original_state": objects.action_plan.State.PENDING,
"new_state": objects.action_plan.State.CANCELLED},
]
class TestPatchStateTransitionDenied(api_base.FunctionalTest):
STATES = [
ap_state for ap_state in objects.action_plan.State.__dict__
if not ap_state.startswith("_")
]
scenarios = [
(
"%s -> %s" % (original_state, new_state),
{"original_state": original_state,
"new_state": new_state},
)
for original_state, new_state
in list(itertools.product(STATES, STATES))
# from DELETED to ...
# NOTE: Any state transition from DELETED (To RECOMMENDED, PENDING,
# ONGOING, CANCELLED, SUCCEEDED and FAILED) will cause a 404 Not Found
# because we cannot retrieve them with a GET (soft_deleted state).
# This is the reason why they are not listed here but they have a
# special test to cover it
if original_state != objects.action_plan.State.DELETED and
original_state != new_state and
{"original_state": original_state,
"new_state": new_state} not in ALLOWED_TRANSITIONS
]
def setUp(self):
super(TestPatchStateTransitionDenied, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
@mock.patch.object(
db_api.BaseConnection, 'update_action_plan',
mock.Mock(side_effect=lambda ap: ap.save() or ap))
def test_replace_state_pending_denied(self):
action_plan = obj_utils.create_test_action_plan(
self.context, state=self.original_state)
initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid)
response = self.patch_json(
'/action_plans/%s' % action_plan.uuid,
[{'path': '/state', 'value': self.new_state,
'op': 'replace'}],
expect_errors=True)
updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid)
self.assertNotEqual(self.new_state, initial_ap['state'])
self.assertEqual(self.original_state, updated_ap['state'])
self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_code)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestPatchStateTransitionOk(api_base.FunctionalTest):
scenarios = [
(
"%s -> %s" % (transition["original_state"],
transition["new_state"]),
transition
)
for transition in ALLOWED_TRANSITIONS
]
def setUp(self):
super(TestPatchStateTransitionOk, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
@mock.patch.object(
db_api.BaseConnection, 'update_action_plan',
mock.Mock(side_effect=lambda ap: ap.save() or ap))
@mock.patch.object(aapi.ApplierAPI, 'launch_action_plan', mock.Mock())
def test_replace_state_pending_ok(self):
action_plan = obj_utils.create_test_action_plan(
self.context, state=self.original_state)
initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid)
response = self.patch_json(
'/action_plans/%s' % action_plan.uuid,
[{'path': '/state', 'value': self.new_state, 'op': 'replace'}])
updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid)
self.assertNotEqual(self.new_state, initial_ap['state'])
self.assertEqual(self.new_state, updated_ap['state'])
self.assertEqual('application/json', response.content_type)
self.assertEqual(HTTPStatus.OK, response.status_code)
class TestActionPlanPolicyEnforcement(api_base.FunctionalTest):
def setUp(self):
super(TestActionPlanPolicyEnforcement, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
rule: "rule:defaut"})
response = func(*arg, **kwarg)
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"action_plan:get_all", self.get_json, '/action_plans',
expect_errors=True)
def test_policy_disallow_get_one(self):
action_plan = obj_utils.create_test_action_plan(self.context)
self._common_policy_check(
"action_plan:get", self.get_json,
'/action_plans/%s' % action_plan.uuid,
expect_errors=True)
def test_policy_disallow_detail(self):
self._common_policy_check(
"action_plan:detail", self.get_json,
'/action_plans/detail',
expect_errors=True)
def test_policy_disallow_update(self):
action_plan = obj_utils.create_test_action_plan(self.context)
self._common_policy_check(
"action_plan:update", self.patch_json,
'/action_plans/%s' % action_plan.uuid,
[{'path': '/state',
'value': objects.action_plan.State.DELETED,
'op': 'replace'}],
expect_errors=True)
def test_policy_disallow_delete(self):
action_plan = obj_utils.create_test_action_plan(self.context)
self._common_policy_check(
"action_plan:delete", self.delete,
'/action_plans/%s' % action_plan.uuid, expect_errors=True)
class TestActionPlanPolicyEnforcementWithAdminContext(TestListActionPlan,
api_base.AdminRoleTest):
def setUp(self):
super(TestActionPlanPolicyEnforcementWithAdminContext, self).setUp()
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
"action_plan:delete": "rule:default",
"action_plan:detail": "rule:default",
"action_plan:get": "rule:default",
"action_plan:get_all": "rule:default",
"action_plan:update": "rule:default",
"action_plan:start": "rule:default"})
|
|
from enum import Enum
from graph.pad import Pad
class FilterState(Enum):
"""
This enum contains the various states a filter can be in.
Run Flow:
- Graph manager calls "run" on a filter
- Filter transitions immediately to run_pending
- When the filter completes the transition, it sets internal state to running and informs the graph manager via filter_state_changed
- When the graph manager sees all filters have transitioned to running, it calls graph_is_running on each filter
Stop Flow:
- Graph manager calls "stop" on a filter
- Filter transitions immediately to stop_pending
- When the filter completes the transition, it sets internal state to stopped and informs the graph manager via filter_state_changed
- From this point on, the filter has flushed all pending internal processing and can no longer generate output
- When the graph manager sees all filters have transitioned to stopped, it calls graph_has_stopped on each filter
"""
stopped = 0
run_pending = 1
running = 2
stop_pending = 3
# The filter has entered an unexpected error state - maybe delete and reallocate a new one
error = 4
class FilterType(Enum):
"""
This enum contains the types of filter.
source: Filter is used to generate data to be passed down the pipline.
Examples: File reader, HTTP get, web server, network socket, constants and literal values.
transform: Filter is used to optionally change the format or content of the payload.
Examples: Logger, XML -> JSON converter, Base64 encoder / decoder, Protocol buf serializer.
sink: Filter is used to write or send data elsewhere.
Examples: File writer, HTTP post, sqlite db, postgresql db, network socket, etc.
A valid graph must contain at least one source and one sink.
TODO: How do we handle filters that can be both like a network socket?
"""
# TODO: Deprecated
source = 1
sink = 2
source_sink = 3
transform = 4
class FilterBase:
"""
Rosetta graph filter base object
"""
#### Filter ranks
FILTER_RANK_NONE = 0
FILTER_RANK_MARGINAL = 1
FILTER_RANK_SECONDARY = 2
FILTER_RANK_PRIMARY = 3
#### Filter metadata
# The fully qualified name of the filter, ex: com.urbtek.logger_sink
FILTER_META_FULLY_QUALIFIED = 'FILTER_META_FULLY_QUALIFIED'
# The name of the filter
FILTER_META_NAME = 'FILTER_META_NAME'
# Description
FILTER_META_DESC = 'FILTER_META_DESC'
# Version in major.minor.revision format
FILTER_META_VER = 'FILTER_META_VER'
# How likely this filter is to be automatically plugged into a graph (0-255)
FILTER_META_RANK = 'FILTER_META_RANK'
# Where did this thing come from?
FILTER_META_ORIGIN_URL = 'FILTER_META_ORIGIN_URL'
# Original author
FILTER_META_AUTHOR = 'FILTER_META_AUTHOR'
# Klass id - used for finding classes of filters. Ex: Source/DB, Sink/Network/Protocol/Device
FILTER_META_KLASS = 'FILTER_META_KLASS'
#### Default names
DEFAULT_SOURCE_PAD_NAME = 'src'
DEFAULT_SINK_PAD_NAME = 'sink'
def __init__(self, name, config_dict, graph_manager, filter_type):
self._filter_name = name
self._config_dict = config_dict
self._source_pads = {}
self._sink_pads = {}
# TODO: Deprecated
self._input_pins = {}
self._output_pins = {}
self._filter_type = filter_type
self._graph_manager = graph_manager
self._filter_state = FilterState.stopped
# This is the only protocol available now, might change in the future
self._protocol_version = 1
# A filter is continuous if it can generate multiple output events over a normal lifetime.
# Ex: A web server filter or network socket filter
# A filter is not continuous if it usually generates a single output event.
# Ex: A file reader filter
# A graph that contain no continuous filters is able to run in one-shot mode
self._is_continuous = False
@property
def protocol_version(self):
return self._protocol_version
@property
def filter_name(self):
return self._filter_name
@property
def filter_state(self):
return self._filter_state
# TODO: Deprecated
@property
def filter_type(self):
return self._filter_type
# TODO: Deprecated
@property
def is_continuous(self):
return self._is_continuous
# TODO: Deprecated
def get_input_pin(self, input_pin_name):
"""
Return a reference to the input pin by name
:param input_pin_name: The name of the input pin to retrieve
:return: An input pin reference or None if not found
"""
ipin = self._input_pins.get(input_pin_name)
return ipin
# TODO: Deprecated
def get_all_input_pins(self):
return self._input_pins.items()
# TODO: Deprecated
def get_output_pin(self, output_pin_name):
"""
Return a reference to the input pin by name
:param output_pin_name: The name of the output pin to retrieve
:return: An input pin reference or None if not found
"""
opin = self._output_pins.get(output_pin_name)
return opin
# TODO: Deprecated
def get_all_output_pins(self):
return self._output_pins.items()
def get_sink_pad(self, sink_pad_name):
"""
Return a reference to the sink pad by name
:param sink_pad_name: The name of the pad to retrieve
:return: An source pad reference or None if not found
"""
sink_pad = self._sink_pads.get(sink_pad_name)
return sink_pad
def get_all_sink_pads(self):
return self._sink_pads.items()
def get_source_pad(self, source_pad_name):
"""
Return a reference to the source pad by name
:param source_pad_name: The name of the source pad to retrieve
:return: An source pad reference or None if not found
"""
source_pad = self._source_pads.get(source_pad_name)
return source_pad
def get_all_source_pads(self):
return self._source_pads.items()
def run(self):
"""
Called by the graph manager (only) when the graph is transitioning to running
:return:
"""
if self._filter_state != FilterState.stopped and self._filter_state != FilterState.stop_pending:
raise RuntimeError("Attempt to run a filter that is not in the stopped state")
def graph_is_running(self):
"""
Called by the graph manager (only) when the graph has transitioned to running
:return:
"""
pass
def stop(self):
"""
Called by the graph manager (only) when the graph is transitioning to stopping
:return:
"""
if self._filter_state != FilterState.running and self._filter_state != FilterState.run_pending:
raise RuntimeError("Attempt to stop a filter that is not in the running state")
def graph_has_stopped(self):
"""
Called by the graph manager (only) when the graph has transitioned to stopped
:return:
"""
pass
def _create_always_pads_from_template(self, template_dict):
"""
Create the always available pads from the templates defined for this filter
:return: None
"""
for key, val in template_dict.items():
if val.is_present_always():
new_pad = Pad.create_pad_from_template(val, key)
if new_pad.is_src:
self._source_pads[key] = new_pad
elif new_pad.is_sink:
self._sink_pads[key] = new_pad
else:
raise Exception("Cannot add an unknown pad type from the source template")
def _add_input_pin(self, input_pin):
"""
Add an input pin to the collection
:param input_pin: A reference to the input pin to be added
:return: None
"""
pin_name = input_pin.pin_name
self._input_pins[pin_name] = input_pin
def _add_output_pin(self, output_pin):
"""
Add an output pin to the collection
:param output_pin: A reference to the output pin to be added
:return: None
"""
pin_name = output_pin.pin_name
self._output_pins[pin_name] = output_pin
def _set_filter_state(self, new_state):
"""
Set the filter to a new state and inform the graph manager
:param new_state: The new state that we're transitioning to
:return: None
"""
self._filter_state = new_state
self._graph_manager.filter_changed_state(self)
def _cycle_started(self):
"""
Called by source filters to mark the start of a cycle
:return:
"""
if self._filter_type == FilterType.source:
self._graph_manager.cycle_started(self)
else:
raise RuntimeError("Could not start cycle because this is not a source filter - {0}".format(self.filter_name))
def _cycle_ended(self):
"""
Called by sink filters to mark the end of a cycle
:return:
"""
if self._filter_type == FilterType.sink:
self._graph_manager.cycle_ended(self)
else:
raise RuntimeError("Could not end cycle because this is not a sink filter - {0}".format(self.filter_name))
|
|
"""Support for Homekit switches."""
from __future__ import annotations
from dataclasses import dataclass
from aiohomekit.model.characteristics import (
Characteristic,
CharacteristicsTypes,
InUseValues,
IsConfiguredValues,
)
from aiohomekit.model.services import ServicesTypes
from homeassistant.components.switch import SwitchEntity, SwitchEntityDescription
from homeassistant.core import callback
from homeassistant.helpers.entity import EntityCategory
from . import KNOWN_DEVICES, CharacteristicEntity, HomeKitEntity
OUTLET_IN_USE = "outlet_in_use"
ATTR_IN_USE = "in_use"
ATTR_IS_CONFIGURED = "is_configured"
ATTR_REMAINING_DURATION = "remaining_duration"
@dataclass
class DeclarativeSwitchEntityDescription(SwitchEntityDescription):
"""Describes Homekit button."""
true_value: bool = True
false_value: bool = False
SWITCH_ENTITIES: dict[str, DeclarativeSwitchEntityDescription] = {
CharacteristicsTypes.Vendor.AQARA_PAIRING_MODE: DeclarativeSwitchEntityDescription(
key=CharacteristicsTypes.Vendor.AQARA_PAIRING_MODE,
name="Pairing Mode",
icon="mdi:lock-open",
entity_category=EntityCategory.CONFIG,
),
CharacteristicsTypes.Vendor.AQARA_E1_PAIRING_MODE: DeclarativeSwitchEntityDescription(
key=CharacteristicsTypes.Vendor.AQARA_E1_PAIRING_MODE,
name="Pairing Mode",
icon="mdi:lock-open",
entity_category=EntityCategory.CONFIG,
),
}
class HomeKitSwitch(HomeKitEntity, SwitchEntity):
"""Representation of a Homekit switch."""
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [CharacteristicsTypes.ON, CharacteristicsTypes.OUTLET_IN_USE]
@property
def is_on(self):
"""Return true if device is on."""
return self.service.value(CharacteristicsTypes.ON)
async def async_turn_on(self, **kwargs):
"""Turn the specified switch on."""
await self.async_put_characteristics({CharacteristicsTypes.ON: True})
async def async_turn_off(self, **kwargs):
"""Turn the specified switch off."""
await self.async_put_characteristics({CharacteristicsTypes.ON: False})
@property
def extra_state_attributes(self):
"""Return the optional state attributes."""
outlet_in_use = self.service.value(CharacteristicsTypes.OUTLET_IN_USE)
if outlet_in_use is not None:
return {OUTLET_IN_USE: outlet_in_use}
class HomeKitValve(HomeKitEntity, SwitchEntity):
"""Represents a valve in an irrigation system."""
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.ACTIVE,
CharacteristicsTypes.IN_USE,
CharacteristicsTypes.IS_CONFIGURED,
CharacteristicsTypes.REMAINING_DURATION,
]
async def async_turn_on(self, **kwargs):
"""Turn the specified valve on."""
await self.async_put_characteristics({CharacteristicsTypes.ACTIVE: True})
async def async_turn_off(self, **kwargs):
"""Turn the specified valve off."""
await self.async_put_characteristics({CharacteristicsTypes.ACTIVE: False})
@property
def icon(self) -> str:
"""Return the icon."""
return "mdi:water"
@property
def is_on(self):
"""Return true if device is on."""
return self.service.value(CharacteristicsTypes.ACTIVE)
@property
def extra_state_attributes(self):
"""Return the optional state attributes."""
attrs = {}
in_use = self.service.value(CharacteristicsTypes.IN_USE)
if in_use is not None:
attrs[ATTR_IN_USE] = in_use == InUseValues.IN_USE
is_configured = self.service.value(CharacteristicsTypes.IS_CONFIGURED)
if is_configured is not None:
attrs[ATTR_IS_CONFIGURED] = is_configured == IsConfiguredValues.CONFIGURED
remaining = self.service.value(CharacteristicsTypes.REMAINING_DURATION)
if remaining is not None:
attrs[ATTR_REMAINING_DURATION] = remaining
return attrs
class DeclarativeCharacteristicSwitch(CharacteristicEntity, SwitchEntity):
"""Representation of a Homekit switch backed by a single characteristic."""
def __init__(
self,
conn,
info,
char,
description: DeclarativeSwitchEntityDescription,
):
"""Initialise a HomeKit switch."""
self.entity_description = description
super().__init__(conn, info, char)
@property
def name(self) -> str:
"""Return the name of the device if any."""
if prefix := super().name:
return f"{prefix} {self.entity_description.name}"
return self.entity_description.name
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [self._char.type]
@property
def is_on(self):
"""Return true if device is on."""
return self._char.value == self.entity_description.true_value
async def async_turn_on(self, **kwargs):
"""Turn the specified switch on."""
await self.async_put_characteristics(
{self._char.type: self.entity_description.true_value}
)
async def async_turn_off(self, **kwargs):
"""Turn the specified switch off."""
await self.async_put_characteristics(
{self._char.type: self.entity_description.false_value}
)
ENTITY_TYPES = {
ServicesTypes.SWITCH: HomeKitSwitch,
ServicesTypes.OUTLET: HomeKitSwitch,
ServicesTypes.VALVE: HomeKitValve,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit switches."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(service):
if not (entity_class := ENTITY_TYPES.get(service.short_type)):
return False
info = {"aid": service.accessory.aid, "iid": service.iid}
async_add_entities([entity_class(conn, info)], True)
return True
conn.add_listener(async_add_service)
@callback
def async_add_characteristic(char: Characteristic):
if not (description := SWITCH_ENTITIES.get(char.type)):
return False
info = {"aid": char.service.accessory.aid, "iid": char.service.iid}
async_add_entities(
[DeclarativeCharacteristicSwitch(conn, info, char, description)], True
)
return True
conn.add_char_factory(async_add_characteristic)
|
|
""" This module deals with encoding and decoding of instructions """
import abc
from .registers import Register
from .token import TokenSequence
class Operand(property):
""" An instruction operand.
When an instruction has
an operand, use this function to create the property.
Arguments:
name: The name that will be shown in the usage.
cls: The type that this function must take.
Custom derived property that implements the descriptor protocol
by inheriting property
"""
def __init__(self, name, cls, read=False, write=False):
self._name = name
if isinstance(cls, dict):
self._value_map = cls
cls = tuple(cls.keys())
else:
self._value_map = None
self._cls = cls
self._read = read
self._write = write
# if isinstance(cls, type) or isinstance(cls, tuple)
# Construct a private backing field for the property:
private_field = "_{}".format(name)
if isinstance(cls, type) and issubclass(cls, Register):
assert read or write
def getter(self):
return getattr(self, private_field)
def setter(self, value):
assert isinstance(value, cls)
setattr(self, private_field, value)
super().__init__(getter, setter)
def __repr__(self):
return "operand name={}, cls={}".format(self._name, self._cls)
@property
def is_constructor(self):
""" Check if this is a simple type, or a choice for many types """
if isinstance(self._cls, tuple):
return True
else:
return issubclass(self._cls, Constructor)
@property
def source(self):
""" Get the original """
return self
def get_value(self, objref):
""" Get the numerical value of this property """
val = self.__get__(objref)
if isinstance(val, Register):
val = val.num
if self._value_map and not isinstance(val, int):
val = self._value_map[type(val)]
assert isinstance(val, int)
return val
def set_value(self, objref, value):
""" Set the numeric value of this property """
raise NotImplementedError()
def from_value(self, value):
""" Create the an object of the right type from the given value """
if issubclass(self._cls, Register):
regs = self._cls.all_registers()
reg_map = {r.num: r for r in regs}
return reg_map[value]
else:
# assume int here!
return value
class Transform(metaclass=abc.ABCMeta):
""" Wrapper to transform the numeric value of a property """
def __init__(self, wrapped):
self._wrapped = wrapped
@abc.abstractmethod
def forwards(self, value): # pragma: no cover
""" Implement the forward transform here """
raise NotImplementedError()
def backwards(self, value): # pragma: no cover
""" Implement the backward transform here """
raise NotImplementedError()
def get_value(self, obj):
""" Get the numerical value of this property """
val = self._wrapped.get_value(obj)
return self.forwards(val)
def from_value(self, value):
value = self.backwards(value)
return self._wrapped.from_value(value)
@property
def source(self):
""" Get the original data source """
return self._wrapped.source
class Constructor:
""" Instruction, or part of an instruction.
An instruction is a special subclass of a constructor. It is final
, in other words, it cannot be used in Constructors. An instruction
can also be materialized, where as constructors are parts of an
instruction.
A constructor can contain a syntax and can be initialized by using
this syntax.
"""
syntax = None
patterns = ()
def __init__(self, *args, **kwargs):
# Generate constructor from args:
if self.syntax:
formal_args = self.syntax.formal_arguments
# Set parameters:
if len(args) != len(formal_args):
raise TypeError(
"{} arguments given, but {} expects {}".format(
len(args), self.__class__, len(formal_args)
)
)
for farg, arg in zip(formal_args, args):
if not isinstance(arg, farg._cls): # pragma: no cover
# Create some nice looking error:
raise TypeError(
'{} expected {}, but got "{}" of type {}'.format(
type(self), farg._cls, arg, type(arg)
)
)
setattr(self, farg._name, arg)
for pname, pval in kwargs.items():
# print('\n\n\n===', pname, pval)
setattr(self, pname, pval)
def __str__(self):
""" Create a nice looking assembly string """
if self.syntax:
return self.syntax.render(self)
else:
return super().__str__()
@staticmethod
def dict_to_patterns(d):
""" Create patterns from dictionary """
if isinstance(d, dict):
patterns = []
for field, value in d.items():
if isinstance(value, int):
patterns.append(FixedPattern(field, value))
elif isinstance(value, (Operand, Transform)):
patterns.append(VariablePattern(field, value))
else: # pragma: no cover
raise NotImplementedError(str(value))
else:
patterns = d
return patterns
def set_patterns(self, tokens):
""" Fill tokens with the specified bit patterns """
for pattern in self.dict_to_patterns(self.patterns):
value = pattern.get_value(self)
assert isinstance(value, int), str(self) + str(value)
tokens.set_field(pattern.field, value)
self.set_user_patterns(tokens)
def set_user_patterns(self, tokens):
""" This is the place for custom patterns """
pass
@classmethod
def from_tokens(cls, tokens):
""" Create this constructor from tokens """
prop_map = {}
patterns = cls.dict_to_patterns(cls.patterns)
# Fill patterns:
for pattern in patterns:
v = tokens.get_field(pattern.field)
if isinstance(pattern, FixedPattern):
if v != pattern.value:
raise ValueError("Cannot decode {}".format(cls))
elif isinstance(pattern, VariablePattern):
prop_map[pattern.prop.source] = pattern.prop.from_value(v)
else: # pragma: no cover
raise NotImplementedError(pattern)
# Create constructors:
fargs = cls.syntax.formal_arguments
for farg in fargs:
if isinstance(farg._cls, tuple):
options = farg._cls
for sub_con in options:
try:
c = sub_con.from_tokens(tokens)
print(c)
prop_map[farg] = c
except ValueError as e:
print(e)
# Instantiate:
init_args = [prop_map[a] for a in fargs]
return cls(*init_args)
@property
def properties(self):
""" Return all properties available into this syntax """
if not self.syntax:
return []
return self.syntax.formal_arguments
@property
def leaves(self):
""" recursively yield all properties used, expanding composite
props.
All properties and the objects on which those properties can be getted
are returned.
"""
for prop in self.properties:
if prop.is_constructor:
for propcls in prop.__get__(self).leaves:
yield propcls
else:
yield prop, self
@property
def non_leaves(self):
""" Get all composite parts.
This is a depth first loop.
"""
yield self
for prop in self.properties:
if prop.is_constructor:
s2 = prop.__get__(self)
# yield s2
for nl in s2.non_leaves:
yield nl
def gen_relocations(self):
""" Override this method to generate relocation information """
return []
class InsMeta(type):
""" Meta class to register an instruction within an isa class. """
def __init__(cls, name, bases, attrs):
super(InsMeta, cls).__init__(name, bases, attrs)
# Register instruction with isa:
if hasattr(cls, "isa"):
cls.isa.add_instruction(cls)
def __add__(cls, other):
assert isinstance(other, InsMeta)
tokens = cls.tokens + other.tokens
p1 = cls.dict_to_patterns(cls.patterns)
p2 = cls.dict_to_patterns(other.patterns)
patterns = p1 + p2
syntax = cls.syntax + other.syntax
members = {"tokens": tokens, "patterns": patterns, "syntax": syntax}
member_list = list(cls.__dict__.items())
member_list += list(other.__dict__.items())
for name, val in member_list:
if isinstance(val, Operand):
if name in members: # pragma: no cover
raise ValueError("{} already defined!".format(name))
members[name] = val
name = cls.__name__ + other.__name__
return InsMeta(name, (Instruction,), members)
class Instruction(Constructor, metaclass=InsMeta):
""" Base instruction class.
Instructions are created in the following ways:
- From python code, by using the instruction directly:
self.stream.emit(Mov(r1, r2))
- By the assembler. This is done via a generated parser.
- By the instruction selector. This is done via pattern matching rules
Instructions can then be emitted to output streams.
Instruction classes are automatically added to an
isa if they have an isa attribute.
"""
def __init__(self, *args, **kwargs):
""" Base instruction constructor.
Takes an arbitrary amount of arguments and tries to fit them
on the args or syntax fields.
"""
super().__init__(*args)
# Initialize the jumps this instruction makes:
self.jumps = []
self.ismove = False
# A list of clobbered registers:
self.clobbers = []
# TODO: some instructions, like call, use several registers.
# Probably this can be handled better:
self.extra_uses = []
self.extra_defs = []
# Set several properties:
for k, v in kwargs.items():
assert hasattr(self, k)
setattr(self, k, v)
@property
def used_registers(self):
""" Return a set of all registers used by this instruction """
s = []
for p, o in self.leaves:
if p._read:
s.append(p.__get__(o))
s.extend(self.extra_uses)
return s
def reads_register(self, register):
""" Check if this instruction reads the given register """
return register in self.used_registers
@property
def defined_registers(self):
""" Return a set of all defined registers """
s = []
for p, o in self.leaves:
if p._write:
s.append(p.__get__(o))
s.extend(self.extra_defs)
return s
def writes_register(self, register):
""" Check if this instruction writes the given register """
return register in self.defined_registers
@property
def registers(self):
""" Determine all registers used by this instruction """
for p, o in self.leaves:
if issubclass(p._cls, Register):
yield p.__get__(o)
def set_all_patterns(self, tokens):
""" Look for all patterns and apply them to the tokens """
assert hasattr(self, "patterns")
# self.set_patterns(tokens)
for nl in self.non_leaves:
nl.set_patterns(tokens)
def replace_register(self, old, new):
""" Replace a register usage with another register """
for p, o in self.leaves:
if issubclass(p._cls, Register):
if p.__get__(o) is old:
p.__set__(o, new)
def get_tokens(self):
precodes = []
tokens = []
for nl in self.non_leaves:
if hasattr(nl, "tokens"):
for tc in nl.tokens:
t = tc()
if t.Info.precode:
precodes.append(t)
else:
tokens.append(t)
return TokenSequence(precodes + tokens)
def get_positions(self):
""" Calculate the positions in the byte stream of all parts """
pos = 0
positions = {}
for nl in self.non_leaves:
positions[nl] = pos
if hasattr(nl, "tokens"):
tokens = getattr(nl, "tokens")
# TODO: this position might not what is expected!
size = sum(t.Info.size for t in tokens) // 8
else:
size = 0
pos += size
return positions
# Interface methods:
def encode(self):
""" Encode the instruction into binary form.
returns bytes for this instruction.
"""
tokens = self.get_tokens()
self.set_all_patterns(tokens)
return tokens.encode()
@classmethod
def decode(cls, data):
""" Decode data into an instruction of this class """
tokens = [tok_cls() for tok_cls in cls.tokens]
tokens = TokenSequence(tokens)
tokens.fill(data)
return cls.from_tokens(tokens)
@classmethod
def sizes(cls):
""" Get possible encoding sizes in bytes """
if hasattr(cls, "tokens"):
return [sum(t.size for t in cls.tokens) // 8]
else:
return []
def relocations(self):
""" Determine the total set of relocations for this instruction """
relocs = []
positions = self.get_positions()
for nl, offset in positions.items():
for reloc in nl.gen_relocations():
relocs.append(reloc.shifted(offset))
return relocs
def symbols(self):
return []
class Syntax:
""" Defines a syntax for an instruction or part of an instruction.
Arguments:
syntax: a list of syntax elements.
new_func: When using this syntax to create
the instruction, instead of the default constructor, this function
is called.
set_props: The set_props property can be used to set additional
properties after creating the instruction.
"""
GLYPHS = [
"@",
"&",
"#",
"=",
",",
".",
":",
"(",
")",
"[",
"]",
"{",
"}",
"+",
"-",
"*",
"%",
]
def __init__(self, syntax, priority=0):
assert isinstance(syntax, (list, tuple))
for element in syntax:
if isinstance(element, str):
if element.isidentifier():
if not element.islower():
raise TypeError(
'element "{}" must be lower case'.format(element)
)
elif element.isspace():
pass
elif element in self.GLYPHS:
pass
else: # pragma: no cover
raise TypeError('Invalid element "{}"'.format(element))
elif isinstance(element, Operand):
pass
else: # pragma: no cover
raise TypeError("Element must be string or parameter")
self.syntax = syntax
self.priority = priority
# Pre-calculate format arguments:
formal_args = []
for element in self.syntax:
if isinstance(element, Operand):
formal_args.append(element)
self.formal_arguments = formal_args
def __add__(self, other):
assert isinstance(other, Syntax)
assert self.priority == 0
assert other.priority == 0
syntax = self.syntax + other.syntax
return Syntax(syntax)
def __repr__(self):
return "{}".format(self.syntax)
def get_args(self):
""" Return all non-whitespace elements """
for element in self.syntax:
if isinstance(element, str) and element.isspace():
continue
yield element
def render(self, obj):
""" Return this syntax formatted for the given object. """
return "".join(self._get_repr(e, obj) for e in self.syntax)
@staticmethod
def _get_repr(syntax_element, obj):
""" Get the repr of a syntax part. Can be str or prop class,
in refering to an element in the args list """
if isinstance(syntax_element, str):
return syntax_element
elif isinstance(syntax_element, Operand):
return str(syntax_element.__get__(obj))
else: # pragma: no cover
raise NotImplementedError(str(syntax_element))
class BitPattern:
""" Base bit pattern class. A bit mapping is a mapping of a field
to a value of some kind.
"""
def __init__(self, field):
self.field = field
def get_value(self, objref): # pragma: no cover
raise NotImplementedError("Implement this for your pattern")
def set_value(self, value): # pragma: no cover
raise NotImplementedError("Implement this for your pattern")
class FixedPattern(BitPattern):
""" Bind a field to a fixed value """
def __init__(self, field, value):
super().__init__(field)
self.value = value
def get_value(self, objref):
return self.value
class VariablePattern(BitPattern):
def __init__(self, field, prop):
super().__init__(field)
self.prop = prop
def get_value(self, objref):
return self.prop.get_value(objref)
class Relocation:
""" Baseclass for all relocation types.
Subclass this class to create custom relocations.
Subclasses should add the following attributes:
- name: the name used to refer to the relocation type.
- number: the number that can be used to uniquely identify the relocation.
- apply: a function that can be used to apply the relocation.
- calc: a function that calculates the value for the relocation.
"""
name = None
number = None
token = None
field = None
def __init__(self, symbol_name, offset=0, addend=0):
self.symbol_name = symbol_name
self.addend = addend
self.offset = offset
def __repr__(self):
return "Reloc[{} offset={}]".format(self.name, self.offset)
def __eq__(self, other):
return (
(self.symbol_name == other.symbol_name)
and (self.offset == other.offset)
and (type(self) == type(other))
and (self.addend == other.addend)
)
def shifted(self, offset):
""" Create a shifted copy of this relocation """
return type(self)(
self.symbol_name, offset=self.offset + offset, addend=self.addend
)
@classmethod
def size(cls):
""" Calculate the amount of bytes this relocation requires """
assert cls.token.Info.size % 8 == 0
return cls.token.Info.size // 8
def apply(self, sym_value, data, reloc_value):
""" Apply this relocation type given some parameters.
This is the default implementation which stores the outcome of
the calculate function into the proper token. """
assert self.token is not None
token = self.token.from_data(data)
assert self.field is not None
assert hasattr(token, self.field)
setattr(token, self.field, self.calc(sym_value, reloc_value))
data = token.encode()
return data
def can_shrink(self, sym_value, reloc_value):
""" Test if this relocation can shrink during the relaxation phase.
Override this method to enable linker relaxation the relocation
subtype.
"""
return False
def calc(self, sym_value, reloc_value): # pragma: no cover
""" Calculate the relocation """
raise NotImplementedError()
|
|
#
# Albow - Fields
#
#-# Modified by D.C.-G. for translation purpose
import locale
from pygame import draw
import pygame
from pygame import key
from pygame.locals import K_LEFT, K_RIGHT, K_TAB, K_c, K_v, K_x, SCRAP_TEXT, K_UP, K_DOWN, K_RALT, K_LALT, \
K_BACKSPACE, K_DELETE, KMOD_SHIFT, KMOD_CTRL, KMOD_ALT, KMOD_META, K_HOME, K_END, K_z, K_y
from widget import Widget, overridable_property
from controls import Control
#-# This need to be changed. We need albow.translate in the config module.
#-# he solution can be a set of functions wich let us define the needed MCEdit 'config' data
#-# without importing it.
#-# It can be a 'config' module built only for albow.
from config import config
#-#
#-#
from translate import _
import pyperclip
#-#
#---------------------------------------------------------------------------
class TextEditor(Widget):
upper = False
tab_stop = True
_text = u""
def __init__(self, width, upper=None, **kwds):
kwds['doNotTranslate'] = kwds.get('doNotTranslate', True)
Widget.__init__(self, **kwds)
self.set_size_for_text(width)
if upper is not None:
self.upper = upper
self.insertion_point = None
self.selection_end = None
self.selection_start = None
self.undoList = []
self.undoNum = 0
self.redoList = []
self.root = self.get_root()
def get_text(self):
return self._text
def set_text(self, text):
self._text = _(text, doNotTranslate=self.doNotTranslate)
text = overridable_property('text')
def draw(self, surface):
frame = self.get_margin_rect()
fg = self.fg_color
font = self.font
focused = self.has_focus()
text, i = self.get_text_and_insertion_point()
if focused and i is None:
if self.selection_start is None or self.selection_end is None:
surface.fill(self.sel_color, frame)
else:
startStep = self.selection_start
endStep = self.selection_end
if startStep > endStep:
x1, h = font.size(text[0:endStep])[0], font.get_linesize()
x2, h = font.size(text[0:startStep])[0], font.get_linesize()
x1 += frame.left
x2 += frame.left
y = frame.top
selRect = pygame.Rect(x1, y, (x2 - x1), h)
else:
x1, h = font.size(text[0:startStep])[0], font.get_linesize()
x2, h = font.size(text[0:endStep])[0], font.get_linesize()
x1 += frame.left
x2 += frame.left
y = frame.top
selRect = pygame.Rect(x1, y, (x2 - x1), h)
draw.rect(surface, self.sel_color, selRect)
image = font.render(text, True, fg)
surface.blit(image, frame)
if focused and i is not None:
x, h = font.size(text[:i]) #[0], font.get_linesize()
x += frame.left
y = frame.top
draw.line(surface, fg, (x, y), (x, y + h - 1))
def key_down(self, event):
self.root.notMove = True
if not event.cmd or (event.alt and event.unicode):
k = event.key
if k == K_LEFT:
if not (key.get_mods() & KMOD_SHIFT):
self.move_insertion_point(-1)
else:
if self.selection_end is None and self.selection_start is None and self.insertion_point is None:
return
if self.selection_end is None and self.insertion_point != 0:
self.selection_start = self.insertion_point
self.selection_end = self.insertion_point - 1
self.insertion_point = None
elif self.selection_end is not None and self.selection_end != 0:
self.selection_end -= 1
if self.selection_end == self.selection_start:
self.insertion_point = self.selection_end
self.selection_end = None
self.selection_start = None
return
if k == K_RIGHT:
if not (key.get_mods() & KMOD_SHIFT):
self.move_insertion_point(1)
else:
if self.selection_end is None and self.selection_start is None and self.insertion_point is None:
return
if self.selection_start is None and self.insertion_point < len(self.text):
self.selection_start = self.insertion_point
self.selection_end = self.insertion_point + 1
self.insertion_point = None
elif self.selection_start is not None and self.selection_end < len(self.text):
self.selection_end += 1
if self.selection_end == self.selection_start:
self.insertion_point = self.selection_end
self.selection_end = None
self.selection_start = None
return
if k == K_TAB:
self.attention_lost()
self.tab_to_next()
return
if k == K_HOME:
if not (key.get_mods() & KMOD_SHIFT):
self.selection_start = None
self.selection_end = None
self.insertion_point = 0
elif self.insertion_point != 0:
if self.insertion_point is not None:
self.selection_start = self.insertion_point
self.insertion_point = None
self.selection_end = 0
if self.selection_end == self.selection_start:
self.insertion_point = self.selection_end
self.selection_end = None
self.selection_start = None
return
if k == K_END:
if not (key.get_mods() & KMOD_SHIFT):
self.selection_start = None
self.selection_end = None
self.insertion_point = len(self.text)
elif self.insertion_point != len(self.text):
if self.insertion_point is not None:
self.selection_start = self.insertion_point
self.insertion_point = None
self.selection_end = len(self.text)
if self.selection_end == self.selection_start:
self.insertion_point = self.selection_end
self.selection_end = None
self.selection_start = None
return
try:
c = event.unicode
except ValueError:
print 'value error'
c = ""
if self.insert_char(c, k) != 'pass':
return
if event.cmd and event.unicode:
if event.key == K_c or event.key == K_x:
try:
#pygame.scrap.put(SCRAP_TEXT, self.text)
text, i = self.get_text_and_insertion_point()
if i is None and (self.selection_start is None or self.selection_end is None):
text = self.text
elif i is None and self.selection_start is not None and self.selection_end is not None:
text = text[(min(self.selection_start, self.selection_end)):max(self.selection_start, self.selection_end)]
else:
return
pyperclip.copy(text)
except:
print "scrap not available"
finally:
if event.key == K_x and i is None:
self.insert_char(event.unicode, K_BACKSPACE)
elif event.key == K_v:
try:
self.addUndo()
#t = pygame.scrap.get(SCRAP_TEXT).replace('\0', '')
t = pyperclip.paste().replace("\n", " ")
if t is not None:
allow = True
for char in t:
if not self.allow_char(char):
allow = False
if not allow:
return
if self.insertion_point is not None:
self.text = self.text[:self.insertion_point] + t + self.text[self.insertion_point:]
self.insertion_point += len(t)
elif self.insertion_point is None and (
self.selection_start is None or self.selection_end is None):
self.text = t
self.insertion_point = len(t)
elif self.insertion_point is None and self.selection_start is not None and self.selection_end is not None:
self.text = self.text[:(min(self.selection_start, self.selection_end))] + t + self.text[(
max(self.selection_start, self.selection_end)):]
self.selection_start = None
self.selection_end = None
else:
return
self.change_text(self.text)
except:
print "scrap not available"
#print repr(t)
elif event.key == K_z and self.undoNum > 0:
self.redoList.append(self.text)
self.undoNum -= 1
self.change_text(self.undoList[self.undoNum])
self.insertion_point = len(self.text)
self.selection_start = None
self.selection_end = None
elif event.key == K_y and len(self.undoList) > self.undoNum:
self.undoNum += 1
self.change_text(self.redoList[-1])
self.redoList = self.redoList[:-1]
self.insertion_point = len(self.text)
self.selection_start = None
self.selection_end = None
else:
self.attention_lost()
def key_up(self, event):
pass
def get_text_and_insertion_point(self):
text = self.get_text()
i = self.insertion_point
if i is not None:
i = max(0, min(i, len(text)))
return text, i
def move_insertion_point(self, d):
self.selection_start = None
self.selection_end = None
text, i = self.get_text_and_insertion_point()
if i is None:
if d > 0:
i = len(text)
else:
i = 0
else:
i = max(0, min(i + d, len(text)))
self.insertion_point = i
def insert_char(self, c, k=None):
self.addUndo()
if self.upper:
c = c.upper()
if k == K_BACKSPACE or k == K_DELETE:
text, i = self.get_text_and_insertion_point()
if i is None:
text = ""
i = 0
else:
if k == K_BACKSPACE:
text = text[:i - 1] + text[i:]
i -= 1
else:
text = text[:i] + text[i + 1:]
self.change_text(text)
self.insertion_point = i
return
elif c == "\r" or c == "\x03":
return self.call_handler('enter_action')
elif c == "\x1b":
return self.call_handler('escape_action')
elif c >= "\x20":
if self.allow_char(c):
text, i = self.get_text_and_insertion_point()
if i is None:
text = c
i = 1
else:
text = text[:i] + c + text[i:]
i += 1
self.change_text(text)
self.insertion_point = i
return
return 'pass'
def addUndo(self):
if len(self.undoList) > self.undoNum:
self.undoList = self.undoList[:self.undoNum]
self.undoList.append(self.text)
self.undoNum += 1
self.redoList = []
def allow_char(self, c):
return True
def mouse_down(self, e):
self.root.notMove = True
self.focus()
self.selection_start = None
self.selection_end = None
if e.num_clicks == 2:
self.insertion_point = None
return
x, y = e.local
i = self.pos_to_index(x)
self.insertion_point = i
def pos_to_index(self, x):
text = self.get_text()
font = self.font
def width(i):
return font.size(text[:i])[0]
i1 = 0
i2 = len(text)
x1 = 0
x2 = width(i2)
while i2 - i1 > 1:
i3 = (i1 + i2) // 2
x3 = width(i3)
if x > x3:
i1, x1 = i3, x3
else:
i2, x2 = i3, x3
if x - x1 > (x2 - x1) // 2:
i = i2
else:
i = i1
return i
def change_text(self, text):
self.set_text(text)
self.call_handler('change_action')
#---------------------------------------------------------------------------
class Field(Control, TextEditor):
# type func(string) -> value
# editing boolean
empty = NotImplemented
format = u"%s"
min = None
max = None
enter_passes = False
def __init__(self, width=None, **kwds):
min = self.predict_attr(kwds, 'min')
max = self.predict_attr(kwds, 'max')
if 'format' in kwds:
self.format = kwds.pop('format')
if 'empty' in kwds:
self.empty = kwds.pop('empty')
self.editing = False
if width is None:
w1 = w2 = ""
if min is not None:
w1 = self.format_value(min)
if max is not None:
w2 = self.format_value(max)
if w2:
if len(w1) > len(w2):
width = w1
else:
width = w2
if width is None:
width = 100
TextEditor.__init__(self, width, **kwds)
def format_value(self, x):
if x == self.empty:
return ""
else:
return self.format % x
def get_text(self):
if self.editing:
return self._text
else:
return self.format_value(self.value)
def set_text(self, text):
self.editing = True
self._text = _(text, doNotTranslate=self.doNotTranslate)
if self.should_commit_immediately(text):
self.commit()
def should_commit_immediately(self, text):
return False
def enter_action(self):
if self.editing:
self.commit()
elif self.enter_passes:
return 'pass'
def escape_action(self):
if self.editing:
self.editing = False
self.insertion_point = None
else:
return 'pass'
def attention_lost(self):
self.commit(notify=True)
def clamp_value(self, value):
if self.max is not None:
value = min(value, self.max)
if self.min is not None:
value = max(value, self.min)
return value
def commit(self, notify=False):
if self.editing:
text = self._text
if text:
try:
value = self.type(text)
except ValueError:
return
value = self.clamp_value(value)
else:
value = self.empty
if value is NotImplemented:
return
self.value = value
self.insertion_point = None
if notify:
self.change_text(unicode(value))
else:
self._text = unicode(value)
self.editing = False
else:
self.insertion_point = None
# def get_value(self):
# self.commit()
# return Control.get_value(self)
#
# def set_value(self, x):
# Control.set_value(self, x)
# self.editing = False
#---------------------------------------------------------------------------
class TextField(Field):
type = unicode
_value = u""
def should_commit_immediately(self, text):
return True
class IntField(Field):
tooltipText = _("Point here and use mousewheel to adjust")
@staticmethod
def type(i):
try:
return eval(i)
except:
try:
return int(i)
except:
return 0
_shift_increment = 16
_increment = 1
@property
def increment(self):
fastIncrementModifier = config.keys.fastIncrementModifier.get()
if (fastIncrementModifier == "Shift" and key.get_mods() & KMOD_SHIFT) or (fastIncrementModifier == "Ctrl" and (key.get_mods() & KMOD_CTRL) or (key.get_mods() & KMOD_META)) or (fastIncrementModifier == "Alt" and key.get_mods() & KMOD_ALT):
return self._shift_increment
return self._increment
@increment.setter
def increment(self, val):
self._increment = val
def decrease_value(self):
self.value = self.clamp_value(self.value - self.increment)
def increase_value(self):
self.value = self.clamp_value(self.value + self.increment)
def mouse_down(self, evt):
if evt.button == 5:
self.decrease_value()
self.change_text(str(self.value))
elif evt.button == 4:
self.increase_value()
self.change_text(str(self.value))
else:
Field.mouse_down(self, evt)
allowed_chars = '-+*/<>()0123456789'
def allow_char(self, c):
return c in self.allowed_chars
def should_commit_immediately(self, text):
while len(text) > 1 and text[0] == '0':
text = text[1:]
self._text = text
try:
return str(eval(text)) == text
except:
return False
class TimeField(Field):
allowed_chars = ':0123456789 APMapm'
def format_value(self, hm):
format = "%02d:%02d"
h, m = hm
if h >= 12:
h -= 12
return format % (h or 12, m) + " PM"
else:
return format % (h or 12, m) + " AM"
def allow_char(self, c):
return c in self.allowed_chars
@staticmethod
def type(i):
h, m = 0, 0
i = i.upper()
pm = "PM" in i
for a in "APM":
i = i.replace(a, "")
parts = i.split(":")
if len(parts):
h = int(parts[0])
if len(parts) > 1:
m = int(parts[1])
if pm and h < 12:
h += 12
h %= 24
m %= 60
return h, m
def mouse_down(self, evt):
if evt.button == 5:
delta = -1
elif evt.button == 4:
delta = 1
else:
return Field.mouse_down(self, evt)
(h, m) = self.value
pos = self.pos_to_index(evt.local[0])
if pos < 3:
h += delta
elif pos < 6:
m += delta
else:
h = (h + 12) % 24
self.value = (h, m)
def set_value(self, v):
h, m = v
super(TimeField, self).set_value((h % 24, m % 60))
class FloatField(Field):
type = float
_increment = 0.1
_shift_increment = 16.0
tooltipText = _("Point here and use mousewheel to adjust")
allowed_chars = '-+.0123456789'
def allow_char(self, c):
return c in self.allowed_chars
@property
def increment(self):
fastIncrementModifier = config.keys.fastIncrementModifier.get()
if (fastIncrementModifier == "Shift" and key.get_mods() & KMOD_SHIFT) or (fastIncrementModifier == "Ctrl" and (key.get_mods() & KMOD_CTRL) or (key.get_mods() & KMOD_META)) or (fastIncrementModifier == "Alt" and key.get_mods() & KMOD_ALT):
return self._shift_increment
return self._increment
@increment.setter
def increment(self, val):
self._increment = self.clamp_value(val)
def decrease_value(self):
self.value = self.clamp_value(self.value - self.increment)
def increase_value(self):
self.value = self.clamp_value(self.value + self.increment)
def mouse_down(self, evt):
if evt.button == 5:
self.decrease_value()
self.change_text(str(self.value))
elif evt.button == 4:
self.increase_value()
self.change_text(str(self.value))
else:
Field.mouse_down(self, evt)
#---------------------------------------------------------------------------
class TextEditorWrapped(Widget):
upper = False
tab_stop = True
_text = u""
def __init__(self, width, lines, upper=None, allowed_chars=None, **kwds):
kwds['doNotTranslate'] = kwds.get('doNotTranslate', True)
Widget.__init__(self, **kwds)
self.set_size_for_text(width, lines)
if upper is not None:
self.upper = upper
self.insertion_point = None
self.insertion_step = None
self.insertion_line = None
self.selection_start = None
self.selection_end = None
self.topLine = 0
self.dispLines = lines
self.textChanged = True
self.allowed_chars = allowed_chars
self.undoList = []
self.undoNum = 0
self.redoList = []
self.root = self.get_root()
def get_text(self):
return self._text
def set_text(self, text):
self._text = _(text, doNotTranslate=self.doNotTranslate)
self.textChanged = True
text = overridable_property('text')
#Text line list and text line EoL index reference
textL = []
textRefList = []
def draw(self, surface):
frame = self.get_margin_rect()
fg = self.fg_color
font = self.font
linesize = font.get_linesize()
focused = self.has_focus()
text, i, il = self.get_text_and_insertion_data()
ip = self.insertion_point
self.doFix = True
self.updateTextWrap()
#Scroll the text up or down if necessary
if self.insertion_line > self.topLine + self.dispLines - 1:
if ip == len(text):
self.scroll_down_all()
else:
self.scroll_down()
elif self.insertion_line < self.topLine:
if ip == 0:
self.scroll_up_all()
else:
self.scroll_up()
#Draw Border
draw.rect(surface, self.sel_color, pygame.Rect(frame.left, frame.top, frame.size[0], frame.size[1]), 1)
#Draw Selection Highlighting if Applicable
if focused and ip is None:
if self.selection_start is None or self.selection_end is None:
surface.fill(self.sel_color, frame)
else:
startLine, startStep = self.get_char_position(self.selection_start)
endLine, endStep = self.get_char_position(self.selection_end)
rects = []
if startLine == endLine:
if startStep > endStep:
x1, h = font.size(self.textL[startLine][0:endStep])[0], font.get_linesize()
x2, h = font.size(self.textL[startLine][0:startStep])[0], font.get_linesize()
x1 += frame.left
x2 += frame.left
lineOffset = startLine - self.topLine
y = frame.top + lineOffset * h
if lineOffset >= 0:
selRect = pygame.Rect(x1, y, (x2 - x1), h)
else:
x1, h = font.size(self.textL[startLine][0:startStep])[0], font.get_linesize()
x2, h = font.size(self.textL[startLine][0:endStep])[0], font.get_linesize()
x1 += frame.left
x2 += frame.left
lineOffset = startLine - self.topLine
y = frame.top + lineOffset * h
if lineOffset >= 0:
selRect = pygame.Rect(x1, y, (x2 - x1), h)
draw.rect(surface, self.sel_color, selRect)
elif startLine < endLine:
x1, h = font.size(self.textL[startLine][0:startStep])[0], font.get_linesize()
x2, h = font.size(self.textL[endLine][0:endStep])[0], font.get_linesize()
x1 += frame.left
x2 += frame.left
lineOffsetS = startLine - self.topLine
lineOffsetE = endLine - self.topLine
lDiff = lineOffsetE - lineOffsetS
while lDiff > 1 and 0 <= lDiff + lineOffsetS + lDiff < self.dispLines:
y = frame.top + lineOffsetS * h + (lDiff - 1) * h
rects.append(pygame.Rect(frame.left, y, frame.right - frame.left, h))
lDiff += -1
y = frame.top + lineOffsetS * h
if lineOffsetS >= 0:
rects.append(pygame.Rect(x1, y, frame.right - x1, h))
y = frame.top + lineOffsetE * h
if lineOffsetE < self.dispLines:
rects.append(pygame.Rect(frame.left, y, x2 - frame.left, h))
for selRect in rects:
draw.rect(surface, self.sel_color, selRect)
elif startLine > endLine:
x2, h = font.size(self.textL[startLine][0:startStep])[0], font.get_linesize()
x1, h = font.size(self.textL[endLine][0:endStep])[0], font.get_linesize()
x1 += frame.left
x2 += frame.left
lineOffsetE = startLine - self.topLine
lineOffsetS = endLine - self.topLine
lDiff = lineOffsetE - lineOffsetS
while lDiff > 1 and 0 <= lDiff + lineOffsetS + lDiff < self.dispLines:
y = frame.top + lineOffsetS * h + (lDiff - 1) * h
rects.append(pygame.Rect(frame.left, y, frame.right - frame.left, h))
lDiff += -1
y = frame.top + lineOffsetS * h
if lineOffsetS >= 0:
rects.append(pygame.Rect(x1, y, frame.right - x1, h))
y = frame.top + lineOffsetE * h
if lineOffsetE < self.dispLines:
rects.append(pygame.Rect(frame.left, y, x2 - frame.left, h))
for selRect in rects:
draw.rect(surface, self.sel_color, selRect)
# Draw Lines of Text
h = 0
for textLine in self.textL[self.topLine:self.topLine + self.dispLines]:
image = font.render(textLine, True, fg)
surface.blit(image, frame.move(0, h))
# h += font.size(textLine)[1]
h += linesize
# Draw Cursor if Applicable
if focused and ip is not None and i is not None and il is not None:
if self.textL:
# x, h = font.size(self.textL[il][:i])
x, h = font.size(self.textL[il][:i])[0], linesize
else:
# x, h = (0, font.size("X")[1])
x, h = (0, linesize)
if self.textRefList and ip == self.textRefList[il]:
if self.doFix:
self.move_insertion_point(-1)
self.doFix = False
x += font.size(self.textL[il][i])[0]
if not self.doFix:
self.move_insertion_point(1)
x += frame.left
y = frame.top + h * (il - self.topLine)
draw.line(surface, fg, (x, y), (x, y + h - 1))
def key_down(self, event):
self.root.notMove = True
if not event.cmd or (event.alt and event.unicode):
k = event.key
if k == K_LEFT:
if not (key.get_mods() & KMOD_SHIFT):
self.move_insertion_point(-1)
else:
if self.selection_end is None and self.selection_start is None and self.insertion_point is None:
return
if self.selection_end is None and self.insertion_point != 0:
self.selection_start = self.insertion_point
self.selection_end = self.insertion_point - 1
self.insertion_point = None
elif self.selection_end is not None and self.selection_end != 0:
self.selection_end -= 1
if self.selection_end == self.selection_start:
self.insertion_point = self.selection_end
self.selection_end = None
self.selection_start = None
return
if k == K_RIGHT:
if not (key.get_mods() & KMOD_SHIFT):
self.move_insertion_point(1)
else:
if self.selection_end is None and self.selection_start is None and self.insertion_point is None:
return
if self.selection_start is None and self.insertion_point < len(self.text):
self.selection_start = self.insertion_point
self.selection_end = self.insertion_point + 1
self.insertion_point = None
elif self.selection_start is not None and self.selection_end < len(self.text):
self.selection_end += 1
if self.selection_end == self.selection_start:
self.insertion_point = self.selection_end
self.selection_end = None
self.selection_start = None
return
if k == K_TAB:
self.attention_lost()
self.tab_to_next()
return
if k == K_DOWN:
self.move_insertion_line(1)
return
if k == K_UP:
self.move_insertion_line(-1)
return
if k == K_HOME:
if not (key.get_mods() & KMOD_SHIFT):
self.selection_start = None
self.selection_end = None
self.insertion_point = 0
self.sync_line_and_step()
elif self.insertion_point != 0:
if self.insertion_point is not None:
self.selection_start = self.insertion_point
self.insertion_point = None
self.selection_end = 0
if self.selection_end == self.selection_start:
self.insertion_point = self.selection_end
self.selection_end = None
self.selection_start = None
self.sync_line_and_step()
return
if k == K_END:
if not (key.get_mods() & KMOD_SHIFT):
self.selection_start = None
self.selection_end = None
self.insertion_point = len(self.text)
self.sync_line_and_step()
elif self.insertion_point != len(self.text):
if self.insertion_point is not None:
self.selection_start = self.insertion_point
self.insertion_point = None
self.selection_end = len(self.text)
if self.selection_end == self.selection_start:
self.insertion_point = self.selection_end
self.selection_end = None
self.selection_start = None
self.sync_line_and_step()
return
try:
c = event.unicode
except ValueError:
print 'value error'
c = ""
if self.insert_char(c, k) != 'pass':
return
if event.cmd and event.unicode:
if event.key == K_c or event.key == K_x:
try:
#pygame.scrap.put(SCRAP_TEXT, self.text)
text, i = self.get_text_and_insertion_point()
if i is None and (self.selection_start is None or self.selection_end is None):
text = self.text
elif i is None and self.selection_start is not None and self.selection_end is not None:
text = text[(min(self.selection_start, self.selection_end)):max(self.selection_start, self.selection_end)]
else:
return
pyperclip.copy(text)
except:
print "scrap not available"
finally:
if event.key == K_x and i is None:
self.insert_char(event.unicode, K_BACKSPACE)
elif event.key == K_v:
try:
self.addUndo()
#t = pygame.scrap.get(SCRAP_TEXT).replace('\0', '')
t = pyperclip.paste().replace("\n", " ")
if t is not None:
allow = True
for char in t:
if not self.allow_char(char):
allow = False
if not allow:
return
if self.insertion_point is not None:
self.text = self.text[:self.insertion_point] + t + self.text[self.insertion_point:]
self.insertion_point += len(t)
elif self.insertion_point is None and (
self.selection_start is None or self.selection_end is None):
self.text = t
self.insertion_point = len(t)
elif self.insertion_point is None and self.selection_start is not None and self.selection_end is not None:
self.text = self.text[:(min(self.selection_start, self.selection_end))] + t + self.text[(
max(self.selection_start, self.selection_end)):]
self.selection_start = None
self.selection_end = None
else:
return
self.change_text(self.text)
self.sync_line_and_step()
except:
print "scrap not available"
#print repr(t)
elif event.key == K_z and self.undoNum > 0:
self.redoList.append(self.text)
self.undoNum -= 1
self.change_text(self.undoList[self.undoNum])
self.insertion_point = len(self.text)
self.selection_start = None
self.selection_end = None
elif event.key == K_y and len(self.undoList) > self.undoNum:
self.undoNum += 1
self.change_text(self.redoList[-1])
self.redoList = self.redoList[:-1]
self.insertion_point = len(self.text)
self.selection_start = None
self.selection_end = None
else:
self.attention_lost()
def key_up(self, event):
pass
def get_text_and_insertion_point(self):
text = self.get_text()
i = self.insertion_point
if i is not None:
i = max(0, min(i, len(text)))
return text, i
def get_text_and_insertion_data(self):
text = self.get_text()
i = self.insertion_step
il = self.insertion_line
if il is not None:
il = max(0, min(il, (len(self.textL) - 1)))
if i is not None and il is not None and len(self.textL) > 0:
i = max(0, min(i, len(self.textL[il]) - 1))
return text, i, il
def move_insertion_point(self, d):
self.selection_end = None
self.selection_start = None
text, i = self.get_text_and_insertion_point()
if i is None:
if d > 0:
i = len(text)
else:
i = 0
else:
i = max(0, min(i + d, len(text)))
self.insertion_point = i
self.sync_line_and_step()
def sync_line_and_step(self):
self.updateTextWrap()
self.sync_insertion_line()
self.sync_insertion_step()
def sync_insertion_line(self):
ip = self.insertion_point
i = 0
for refVal in self.textRefList:
if ip > refVal:
i += 1
elif ip <= refVal:
break
self.insertion_line = i
def sync_insertion_step(self):
ip = self.insertion_point
il = self.insertion_line
if ip is None:
self.move_insertion_point(0)
ip = self.insertion_point
if il is None:
self.move_insertion_line(0)
il = self.insertion_line
if il > 0:
refPoint = self.textRefList[il - 1]
else:
refPoint = 0
self.insertion_step = ip - refPoint
def get_char_position(self, i):
j = 0
for refVal in self.textRefList:
if i > refVal:
j += 1
elif i <= refVal:
break
line = j
if line > 0:
refPoint = self.textRefList[line - 1]
else:
refPoint = 0
step = i - refPoint
return line, step
def move_insertion_line(self, d):
text, i, il = self.get_text_and_insertion_data()
if self.selection_end is not None:
endLine, endStep = self.get_char_position(self.selection_end)
il = endLine
i = endStep
self.insertion_step = i
self.selection_end = None
self.selection_start = None
if il is None:
if d > 0:
if len(self.textL) > 1:
self.insertion_line = d
else:
self.insertion_line = 0
else:
self.insertion_line = 0
if i is None:
self.insertion_step = 0
elif 0 <= d + il + d < len(self.textL):
self.insertion_line = il + d
if self.insertion_line > 0:
self.insertion_point = self.textRefList[self.insertion_line - 1] + self.insertion_step
if self.insertion_point > len(self.text):
self.insertion_point = len(self.text)
else:
if self.insertion_step is not None:
self.insertion_point = self.insertion_step
else:
self.insertion_point = 0
self.insertion_step = 0
def insert_char(self, c, k=None):
self.addUndo()
if self.upper:
c = c.upper()
if k == K_BACKSPACE or k == K_DELETE:
text, i = self.get_text_and_insertion_point()
if i is None and (self.selection_start is None or self.selection_end is None):
text = ""
i = 0
self.insertion_line = i
self.insertion_step = i
elif i is None and self.selection_start is not None and self.selection_end is not None:
i = min(self.selection_start, self.selection_end)
text = text[:(min(self.selection_start, self.selection_end))] + text[(
max(self.selection_start, self.selection_end)):]
self.selection_start = None
self.selection_end = None
elif i > 0:
if k == K_BACKSPACE:
text = text[:i - 1] + text[i:]
i -= 1
else:
text = text[:i] + text[i + 1:]
self.change_text(text)
self.insertion_point = i
self.sync_line_and_step()
return
elif c == "\r" or c == "\x03":
return self.call_handler('enter_action')
elif c == "\x1b":
return self.call_handler('escape_action')
elif c >= "\x20":
if self.allow_char(c):
text, i = self.get_text_and_insertion_point()
if i is None and (self.selection_start is None or self.selection_end is None):
text = c
i = 1
elif i is None and self.selection_start is not None and self.selection_end is not None:
i = min(self.selection_start, self.selection_end) + 1
text = text[:(min(self.selection_start, self.selection_end))] + c + text[(
max(self.selection_start, self.selection_end)):]
self.selection_start = None
self.selection_end = None
else:
text = text[:i] + c + text[i:]
i += 1
self.change_text(text)
self.insertion_point = i
self.sync_line_and_step()
return
return 'pass'
def addUndo(self):
if len(self.undoList) > self.undoNum:
self.undoList = self.undoList[:self.undoNum]
self.undoList.append(self.text)
self.undoNum += 1
self.redoList = []
def allow_char(self, c):
if not self.allowed_chars:
return True
return c in self.allowed_chars
def mouse_down(self, e):
self.root.notMove = True
self.focus()
if e.button == 1:
if e.num_clicks == 2:
self.insertion_point = None
self.selection_start = None
self.selection_end = None
return
x, y = e.local
i = self.pos_to_index(x, y)
self.insertion_point = i
self.selection_start = None
self.selection_end = None
self.sync_line_and_step()
if e.button == 5:
# self.scroll_down()
self.move_insertion_line(1)
if e.button == 4:
# self.scroll_up()
self.move_insertion_line(-1)
def mouse_drag(self, e):
x, y = e.local
i = self.pos_to_index(x, y)
if self.insertion_point is not None:
if i != self.insertion_point:
if self.selection_start is None:
self.selection_start = self.insertion_point
self.selection_end = i
self.insertion_point = None
else:
if self.selection_start is None:
self.selection_start = i
else:
if self.selection_start == i:
self.selection_start = None
self.selection_end = None
self.insertion_point = i
else:
self.selection_end = i
def pos_to_index(self, x, y):
textL = self.textL
textRef = self.textRefList
topLine = self.topLine
dispLines = self.dispLines
font = self.font
if textL:
# h = font.size("X")[1]
h = font.get_linesize()
line = y // h
if line >= dispLines:
line = dispLines - 1
line += topLine
if line >= len(textL):
line = len(textL) - 1
if line < 0:
line = 0
def width(i):
return font.size(textL[line][:i])[0]
i1 = 0
i2 = len(textL[line])
x1 = 0
x2 = width(i2)
while i2 - i1 > 1:
i3 = (i1 + i2) // 2
x3 = width(i3)
if x > x3:
i1, x1 = i3, x3
else:
i2, x2 = i3, x3
if x - x1 > (x2 - x1) // 2:
i = i2
else:
i = i1
if line > 0:
i = i + textRef[line - 1]
else:
i = 0
return i
def change_text(self, text):
self.set_text(_(text, doNotTranslate=self.doNotTranslate))
self.textChanged = True
self.updateTextWrap()
self.call_handler('change_action')
def scroll_up(self):
if self.topLine - 1 >= 0:
self.topLine -= 1
def scroll_down(self):
if self.topLine + 1 < len(self.textL) - self.dispLines + 1:
self.topLine += 1
def scroll_up_all(self):
if self.topLine - 1 >= 0:
self.topLine = 0
def scroll_down_all(self):
if self.topLine + 1 < len(self.textL) - self.dispLines + 1:
self.topLine = len(self.textL) - self.dispLines
def updateTextWrap(self):
# Update text wrapping for box
font = self.font
frame = self.get_margin_rect()
frameW, frameH = frame.size
if self.textChanged:
ix = 0
iz = 0
textLi = 0
text = self.text
textL = []
textR = []
while ix < len(text):
ix += 1
if ix == '\r' or ix == '\x03' or ix == '\n':
print("RETURN FOUND")
if len(textL) > textLi:
textL[textLi] = text[iz:ix]
textR[textLi] = ix
else:
textL.append(text[iz:ix])
textR.append(ix)
iz = ix + 1
textLi += 1
segW = font.size(text[iz:ix])[0]
if segW > frameW:
if len(textL) > textLi:
textL[textLi] = text[iz:ix - 1]
textR[textLi] = ix - 1
else:
textL.append(text[iz:ix - 1])
textR.append(ix - 1)
iz = ix - 1
textLi += 1
if iz < ix:
if len(textL) > textLi:
textL[textLi] = text[iz:ix]
textR[textLi] = ix
else:
textL.append(text[iz:ix])
textR.append(ix)
iz = ix
textLi += 1
textL = textL[:textLi]
textR = textR[:textLi]
self.textL = textL
self.textRefList = textR
self.textChanged = False
i = 0
#---------------------------------------------------------------------------
class FieldWrapped(Control, TextEditorWrapped):
# type func(string) -> value
# editing boolean
empty = ""
format = u"%s"
min = None
max = None
enter_passes = False
def __init__(self, width=None, lines=1, allowed_chars=None, **kwds):
min = self.predict_attr(kwds, 'min')
max = self.predict_attr(kwds, 'max')
if 'format' in kwds:
self.format = kwds.pop('format')
if 'empty' in kwds:
self.empty = kwds.pop('empty')
self.editing = False
if width is None:
w1 = w2 = ""
if min is not None:
w1 = self.format_value(min)
if max is not None:
w2 = self.format_value(max)
if w2:
if len(w1) > len(w2):
width = w1
else:
width = w2
if width is None:
width = 100
if lines is None:
lines = 1
TextEditorWrapped.__init__(self, width, lines, allowed_chars=allowed_chars, **kwds)
def format_value(self, x):
if x == self.empty:
return ""
else:
return self.format % _(x, doNotTranslate=self.doNotTranslate)
def get_text(self):
if self.editing:
return self._text
else:
return self.format_value(self.value)
def set_text(self, text):
self.editing = True
self._text = _(text, doNotTranslate=self.doNotTranslate)
if self.should_commit_immediately(text):
self.commit()
@staticmethod
def should_commit_immediately(text):
return False
def enter_action(self):
if self.editing:
self.commit()
elif self.enter_passes:
return 'pass'
def escape_action(self):
if self.editing:
self.editing = False
self.insertion_point = None
else:
return 'pass'
def attention_lost(self):
self.commit(notify=True)
def clamp_value(self, value):
if self.max is not None:
value = min(value, self.max)
if self.min is not None:
value = max(value, self.min)
return value
def commit(self, notify=False):
if self.editing:
text = self._text
if text:
try:
value = self.type(text)
except ValueError:
return
value = self.clamp_value(value)
else:
value = self.empty
if value is NotImplemented:
return
self.value = value
self.insertion_point = None
if notify:
self.change_text(unicode(value))
else:
self._text = unicode(value)
self.editing = False
else:
self.insertion_point = None
# def get_value(self):
# self.commit()
# return Control.get_value(self)
#
# def set_value(self, x):
# Control.set_value(self, x)
# self.editing = False
#---------------------------------------------------------------------------
class TextFieldWrapped(FieldWrapped):
type = unicode
_value = u""
|
|
""" Data wrapper class
Manages loading data and prepping batches. Dynamically stores and loads chunks of data to disk to support datasets
larger than the available RAM. Uses numpy for saving and loading data, as it is much faster than pickle and allows
compression.
It can be instantiated with 4 different shuffle_* flags, each responsible for random behavior. If a deterministic
order of data is required in an already existing object, it is enough to temporarily set shuffle_batch_on_return and
shuffle_in_chunk_on_chunk_reload flags to False before iterating.
Parameters:
data_path - str or list - path to file(s) containing data
train_ids - int or list - ids specifying datasets used for training, may be one or multiple files
valid_id - int - id specifying dataset used for validation, has to be one file only
test_id - int or None - id specifying dataset used for testing, may be one file or none
eval_ids - int or list or None - ids specifying datasets that are used for neither of the above purposes, but just
evaluation
create_chunks - bool - if False, does not load the data from the files specified, but from temporary chunks. If
chunks do not exist the program fails. Use it to speed up loading huge datasets.
chunk_size - int - number of tweets in one chunk
batch_size - int - number of tweets in one training batch
shuffle_chunks_on_load - bool - if True, shuffles the chunks while loading data from files
shuffle_in_chunks_on_load - bool - if True, shuffles tweets inside chunks while loading data from files
shuffle_batch_on_return - bool - if True, shuffles tweets inside batch while iterating on dataset
shuffle_in_chunk_on_chunk_reload - bool - if True, shuffles tweets inside the chunk whenever chunk is loaded
rng_seed - int or None - random number generator seed
temp_dir - str - path to the directory to store the chunks in
"""
from __future__ import print_function
import cPickle as pickle
import numpy as np
import os
__all__ = [
"DataWrapper"
]
class DataWrapper:
def __init__(self,
data_path,
train_ids,
valid_id,
test_id=None,
eval_ids=None,
create_chunks=True,
chunk_size=10000,
batch_size=200,
shuffle_chunks_on_load=True,
shuffle_in_chunks_on_load=True,
shuffle_batch_on_return=True,
shuffle_in_chunk_on_chunk_reload=True,
rng_seed=None,
temp_dir='temp_chunks',
print_progress=True):
self.data_path = data_path
if isinstance(self.data_path, basestring):
self.data_path = [self.data_path]
self.dataset_names = []
for path in self.data_path:
self.dataset_names.append(os.path.basename(path))
self.temp_dir = temp_dir
self.chunk_size = chunk_size // batch_size * batch_size # make chunk_size a multiple of batch_size
self.batch_size = batch_size
self.shuffle_chunks_on_load = shuffle_chunks_on_load
self.shuffle_in_chunks_on_load = shuffle_in_chunks_on_load
self.shuffle_batch_on_return = shuffle_batch_on_return
self.shuffle_in_chunk_on_chunk_reload = shuffle_in_chunk_on_chunk_reload
if rng_seed is not None:
np.random.seed(rng_seed)
self.rng_seed = rng_seed
self.create_chunks = create_chunks
self.n_datasets = len(self.data_path)
self.print_progress = print_progress
if train_ids is None:
raise ValueError('Specify at least one train id.')
if isinstance(train_ids, (int, long)):
train_ids = [train_ids]
self.train_ids = train_ids
if valid_id is None:
raise ValueError('Specify at least one validation id.')
self.valid_id = valid_id
self.test_id = test_id
if isinstance(eval_ids, (int, long)):
eval_ids = [eval_ids]
if eval_ids is None:
eval_ids = []
self.eval_ids = eval_ids
self.max_len = 0
self.labels = []
self.n_labels = 0
self.charset_map = {}
self.charset_size = 0
self.n_tweets = []
self.n_chunks = []
self.n_batches = []
self.x = None
self.x_mask = None
self.y = None
self.current_batch = 0
self.current_chunk = 0
self.current_data = 0
self.__load_data_params()
self.__load_data()
def __iter__(self):
return self
def next(self):
if self.current_batch < self.n_batches[self.current_data]:
batch = self.__get_batch(self.current_batch)
self.current_batch += 1
return batch
else:
self.current_batch = 0
raise StopIteration()
def set_current_data(self, no):
if 0 <= no < len(self.data_path):
self.current_data = no
self.current_batch = 0
self.current_chunk = 0
self.__load_chunk(0)
def __get_batch(self, batch_id):
if self.n_chunks[self.current_data] == 1:
current_batch_in_chunk = batch_id
else:
# Load another chunk if necessary
if not self.__is_batch_in_chunk(batch_id, self.current_chunk):
self.__load_chunk(self.__get_chunk_id_of_batch(batch_id))
current_batch_in_chunk = batch_id % (self.chunk_size / self.batch_size)
current_slice = range(current_batch_in_chunk * self.batch_size,
(current_batch_in_chunk + 1) * self.batch_size)
if self.shuffle_batch_on_return:
np.random.shuffle(current_slice)
return self.x[current_slice], self.x_mask[current_slice], self.y[current_slice]
def __is_batch_in_chunk(self, batch_id, chunk_id):
return self.chunk_size * chunk_id <= batch_id * self.batch_size < self.chunk_size * (chunk_id + 1)
def __get_chunk_id_of_batch(self, batch_id):
return batch_id * self.batch_size // self.chunk_size
def __load_data_params(self):
if self.create_chunks:
for i_path, path in enumerate(self.data_path):
with open(path, 'rb') as pfile:
tweets = pickle.load(pfile)
self.n_tweets.append(len(tweets))
for iTweet, tweet_entry in enumerate(tweets):
tweet_text = tweet_entry[1]
tweet_sentiment = tweet_entry[2]
if len(tweet_text) > self.max_len:
self.max_len = len(tweet_text)
for symbol in tweet_text:
if symbol not in self.charset_map:
self.charset_map[symbol] = self.charset_size
self.charset_size += 1
if tweet_sentiment not in self.labels:
self.labels.append(tweet_sentiment)
self.n_labels += 1
self.n_chunks.append((self.n_tweets[i_path] - 1) / self.chunk_size + 1)
self.n_batches.append((self.n_tweets[i_path] - 1) / self.batch_size + 1)
self.__save_chunk_info()
else:
self.__load_chunk_info()
def __save_chunk_info(self):
if not os.path.isdir(self.temp_dir):
os.mkdir(self.temp_dir)
with open(os.path.join(self.temp_dir, 'chunk_info.p'), 'wb') as pfile:
pickle.dump([self.max_len,
self.labels,
self.n_labels,
self.charset_map,
self.charset_size,
self.n_tweets,
self.n_chunks,
self.n_batches], pfile)
def __load_chunk_info(self):
with open(os.path.join(self.temp_dir, 'chunk_info.p'), 'rb') as pfile:
[self.max_len,
self.labels,
self.n_labels,
self.charset_map,
self.charset_size,
self.n_tweets,
self.n_chunks,
self.n_batches] = pickle.load(pfile)
def __load_data(self):
if self.create_chunks:
self.symbols_loaded = 0
for i_path, path in enumerate(self.data_path):
self.current_data = i_path
with open(path, 'rb') as pfile:
if self.print_progress:
print(self.dataset_names[i_path] + ': ', end='')
step = max(self.n_tweets[i_path] // 10 + 1, 1)
offset = step * 10 - self.n_tweets[i_path] + 1
if self.print_progress and self.n_tweets[i_path] < 10:
print('.' * (10 - self.n_tweets[i_path]), end='')
chunk_ids = range(self.n_chunks[i_path])
if self.shuffle_chunks_on_load:
# leave the last chunk at its place as it is most probably not full
last_id = chunk_ids[-1]
chunk_ids = chunk_ids[:-1]
np.random.shuffle(chunk_ids)
chunk_ids.append(last_id)
# limit the size in case there is not enough data to fill the whole chunk
if self.n_chunks[i_path] > 1:
data_size = self.chunk_size
else:
data_size = self.n_batches[i_path] * self.batch_size
tweets = pickle.load(pfile)
self.__reset_data(data_size)
chunk_id = 0
for iTweet, tweet_entry in enumerate(tweets):
if self.print_progress and not (iTweet + offset) % step:
print('.', end='')
iTweet %= self.chunk_size
tweet_text = tweet_entry[1]
tweet_sentiment = tweet_entry[2]
for iSym, symbol in enumerate(tweet_text):
self.x[iTweet, iSym] = self.charset_map[symbol]
self.x_mask[iTweet, iSym] = 1
self.symbols_loaded += 1
self.y[iTweet] = int(tweet_sentiment)
if iTweet == self.chunk_size - 1:
# chunk full - save
if self.shuffle_in_chunks_on_load:
self.__shuffle_data()
self.__save_chunk(chunk_ids[chunk_id])
if chunk_id == self.n_chunks[self.current_data] - 2:
# the last chunk may be smaller
data_size = (self.n_batches[i_path] * self.batch_size) % self.chunk_size
self.__reset_data(data_size)
chunk_id += 1
if chunk_id == self.n_chunks[self.current_data] - 1:
if self.shuffle_in_chunks_on_load:
self.__shuffle_data()
self.__save_chunk(chunk_ids[chunk_id])
if self.print_progress:
print('')
self.current_data = 0
self.__load_chunk(0)
def __encode1hot(self):
x_1hot = np.zeros((self.x.shape[0], self.x.shape[1], self.charset_size))
for iTweet, tweet in enumerate(self.x):
for iSym, symbol in enumerate(tweet):
if self.x_mask[iTweet, iSym] == 1:
x_1hot[iTweet, iSym, symbol] = 1
return x_1hot
def __reset_data(self, data_size):
self.x = np.zeros((data_size, self.max_len), dtype=np.uint32)
self.x_mask = np.zeros((data_size, self.max_len), dtype=np.uint32)
self.y = np.zeros(data_size, dtype=np.uint32)
def __shuffle_data(self):
current_slice = range(self.y.shape[0])
np.random.shuffle(current_slice)
self.x = self.x[current_slice]
self.x_mask = self.x_mask[current_slice]
self.y = self.y[current_slice]
def __save_chunk(self, chunk_id):
if not os.path.isdir(self.temp_dir):
os.mkdir(self.temp_dir)
file_path = os.path.join(self.temp_dir, 'chunk_' + str(self.current_data) + '_' + str(chunk_id) + '.npz')
with open(file_path, 'wb') as pfile:
np.savez_compressed(pfile, x=self.x, x_mask=self.x_mask, y=self.y)
def __load_chunk(self, chunk_id):
file_path = os.path.join(self.temp_dir, 'chunk_' + str(self.current_data) + '_' + str(chunk_id) + '.npz')
with np.load(file_path) as vals:
self.x = vals['x']
self.x_mask = vals['x_mask']
self.y = vals['y']
self.current_chunk = chunk_id
if self.shuffle_in_chunk_on_chunk_reload:
self.__shuffle_data()
self.x = self.__encode1hot()
|
|
#!/usr/bin/env python
"""
Sentry
======
Sentry is a realtime event logging and aggregation platform. It specializes
in monitoring errors and extracting all the information needed to do a proper
post-mortem without any of the hassle of the standard user feedback loop.
Sentry is a Server
------------------
The Sentry package, at its core, is just a simple server and web UI. It will
handle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)
and all of the logic behind storage and aggregation.
That said, Sentry is not limited to Python. The primary implementation is in
Python, but it contains a full API for sending events from any language, in
any application.
:copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import datetime
import json
import os.path
from distutils import log
from distutils.core import Command
from setuptools.command.install import install
from setuptools.command.develop import develop
from setuptools.command.sdist import sdist
from setuptools import setup, find_packages
from subprocess import check_output
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
dev_requires = [
'flake8>=2.0,<2.1',
]
tests_require = [
'blist', # used by cassandra
'casscache',
'cqlsh',
'datadog',
'elasticsearch',
'httpretty',
'pytest-cov>=1.4',
'pytest-timeout',
'python-coveralls',
'responses',
'riak',
]
install_requires = [
'BeautifulSoup>=3.2.1,<3.3.0',
'celery>=3.1.8,<3.2.0',
'cssutils>=0.9.9,<0.10.0',
'Django>=1.6.0,<1.7',
'django-bitfield>=1.7.0,<1.8.0',
'django-crispy-forms>=1.4.0,<1.5.0',
'django-debug-toolbar>=1.3.2,<1.4.0',
'django-paging>=0.2.5,<0.3.0',
'django-jsonfield>=0.9.13,<0.9.14',
'django-picklefield>=0.3.0,<0.4.0',
'django-recaptcha>=1.0.4,<1.1.0',
'django-social-auth>=0.7.28,<0.8.0',
'django-sudo>=1.1.3,<1.2.0',
'django-templatetag-sugar>=0.1.0',
'djangorestframework>=2.3.8,<2.4.0',
'email-reply-parser>=0.2.0,<0.3.0',
'enum34>=0.9.18,<0.10.0',
'exam>=0.5.1',
'gunicorn>=19.2.1,<20.0.0',
'ipaddr>=2.1.11,<2.2.0',
'logan>=0.7.1,<0.8.0',
'lxml>=3.4.1',
'mock>=0.8.0',
'markdown>=2.4.1,<2.5.0',
'petname>=1.7,<1.8',
'progressbar>=2.2,<2.4',
'pytest',
'pytest-django',
'python-dateutil>=2.0.0,<3.0.0',
'python-memcached>=1.53,<2.0.0',
'raven>=5.3.0',
'redis>=2.7.0,<2.11.0',
'requests[security]>=2.7.0,<2.8.0',
'simplejson>=3.1.0,<3.4.0',
'six>=1.6.0,<2.0.0',
'setproctitle>=1.1.7,<1.2.0',
'statsd>=3.1.0,<3.2.0',
'South==1.0.1',
'toronado>=0.0.4,<0.1.0',
'ua-parser>=0.3.5',
'urllib3>=1.7.1,<1.8.0',
'rb',
]
postgres_requires = [
'psycopg2>=2.5.0,<2.6.0',
]
postgres_pypy_requires = [
'psycopg2cffi',
]
mysql_requires = [
'MySQL-python>=1.2.0,<1.3.0',
]
class DevelopWithBuildStatic(develop):
def install_for_development(self):
self.run_command('build_static')
return develop.install_for_development(self)
class SdistWithBuildStatic(sdist):
def make_release_tree(self, *a, **kw):
dist_path = self.distribution.get_fullname()
sdist.make_release_tree(self, *a, **kw)
self.reinitialize_command('build_static', work_path=dist_path)
self.run_command('build_static')
with open(os.path.join(dist_path, 'sentry-package.json'), 'w') as fp:
json.dump({
'createdAt': datetime.datetime.utcnow().isoformat() + 'Z',
}, fp)
class BuildStatic(Command):
user_options = [
('work-path=', 'w',
"The working directory for source files. Defaults to ."),
]
def initialize_options(self):
self.work_path = None
def finalize_options(self):
if self.work_path is None:
self.work_path = ROOT
def run(self):
work_path = self.work_path
log.info("initializing git submodules")
check_output(['git', 'submodule', 'init'], cwd=work_path)
check_output(['git', 'submodule', 'update'], cwd=work_path)
log.info("running [npm install --quiet]")
check_output(['npm', 'install', '--quiet'], cwd=work_path)
log.info("running [gulp dist]")
check_output([os.path.join('node_modules', '.bin', 'gulp'), 'dist:css'],
cwd=work_path)
# Enable React production optimization
os.environ['NODE_ENV'] = 'production'
log.info("running [webpack]")
check_output([os.path.join('node_modules', '.bin', 'webpack'), '-p'],
cwd=work_path)
class SmartInstall(install):
"""
Installs Sentry into the Python environment.
If the package indicator is missing, this will also force a run of
`build_static` which is required for JavaScript assets and other things.
"""
def _needs_static(self):
return not os.path.exists(os.path.join(ROOT, 'sentry-package.json'))
def run(self):
if self._needs_static():
self.run_command('build_static')
install.run(self)
setup(
name='sentry',
version='8.0.0.dev0',
author='David Cramer',
author_email='[email protected]',
url='https://www.getsentry.com',
description='A realtime logging and aggregation server.',
long_description=open('README.rst').read(),
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
'dev': dev_requires,
'postgres': install_requires + postgres_requires,
'postgres_pypy': install_requires + postgres_pypy_requires,
'mysql': install_requires + mysql_requires,
},
cmdclass={
'build_static': BuildStatic,
'develop': DevelopWithBuildStatic,
'sdist': SdistWithBuildStatic,
'install': SmartInstall,
},
license='BSD',
include_package_data=True,
entry_points={
'console_scripts': [
'sentry = sentry.utils.runner:main',
],
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
|
import sys
import numpy as np
import csv
from datetime import datetime
from datetime import timedelta
def subtract_dates(date1, date2):
"""
Takes two dates %Y-%m-%d format. Returns date1 - date2, measured in days.
"""
date_format = "%Y-%m-%d"
a = datetime.strptime(date1, date_format)
b = datetime.strptime(date2, date_format)
delta = a - b
#print(date1,"-",date2,"=",delta.days)
return delta.days
def steps_to_date(steps, start_date):
date_format = "%Y-%m-%d"
date_1 = datetime.strptime(start_date, "%Y-%m-%d")
new_date = (date_1 + timedelta(days=steps)).date()
return new_date
def _processEntry(row, table, data_type, date_column, count_column, start_date):
"""
Code to process a population count from a CSV file.
column <date_column> contains the corresponding date in %Y-%m-%d format.
column <count_column> contains the population size on that date.
"""
if len(row) < 2:
return table
if row[0][0] == "#":
return table
if row[1]=="":
return table
# Make sure the date column becomes an integer, which contains the offset in days relative to the start date.
row[date_column] = subtract_dates(row[date_column], start_date)
if data_type == "int":
table = np.vstack([table,[int(row[date_column]), int(row[count_column])]])
else:
table = np.vstack([table,[float(row[date_column]), float(row[count_column])]])
return table
def AddCSVTables(table1, table2):
"""
Add two time series tables. This version does not yet support interpolation between values.
(The UNHCR data website also does not do this, by the way)
"""
table = np.zeros([0,2])
offset = 0
last_c2 = np.zeros(([1,2]))
for c2 in table2:
# If table 2 date value is higher, then keep adding entries from table 1
while c2[0] > table1[offset][0]:
table = np.vstack([table,[table1[offset][0], last_c2[1]+table1[offset][1]]])
if(offset < len(table1)-1):
offset += 1
else:
break
# If the two match, add a total.
if c2[0] == table1[offset][0]:
table = np.vstack([table,[c2[0], c2[1]+table1[offset][1]]])
if(offset < len(table1)-1):
offset += 1
last_c2 = c2
continue
# If table 1 value is higher, add an aggregate entry, and go to the next iteration without increasing the offset.
if c2[0] < table1[offset][0]:
table = np.vstack([table,[c2[0], c2[1]+table1[offset][1]]])
last_c2 = c2
continue
return table
def ConvertCsvFileToNumPyTable(csv_name, data_type="int", date_column=0, count_column=1, start_date="2012-02-29"):
"""
Converts a CSV file to a table with date offsets from 29 feb 2012.
CSV format for each line is:
yyyy-mm-dd,number
Default settings:
- subtract_dates is used on column 0.
- Use # sign to comment out lines. (first line is NOT ignored by default)
"""
table = np.zeros([0,2])
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
row = next(values)
if(len(row)>1):
if len(row[0])>0 and "DateTime" not in row[0]:
table = _processEntry(row, table, data_type, date_column, count_column, start_date)
for row in values:
table = _processEntry(row, table, data_type, date_column, count_column, start_date)
return table
class DataTable:
def __init__(self, data_directory="mali2012", data_layout="data_layout_refugee.csv", start_date="2012-02-29", csvformat="generic"):
"""
read in CSV data files containing refugee data.
"""
self.csvformat = csvformat
self.total_refugee_column = 1
self.days_column = 0
self.header = []
self.data_table = []
self.start_date = start_date
self.override_refugee_input = False # Use modified input data for FLEE simulations
self.override_refugee_input_file = ""
self.data_directory = data_directory
if self.csvformat=="generic":
with open("%s/%s" % (data_directory, data_layout), newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if(len(row)>1):
if(row[0][0] == "#"):
continue
self.header.append(row[0])
#print("%s/%s" % (data_directory, row[1]))
csv_total = ConvertCsvFileToNumPyTable("%s/%s" % (data_directory, row[1]), start_date=start_date)
for added_csv in row[2:]:
csv_total = AddCSVTables(csv_total, ConvertCsvFileToNumPyTable("%s/%s" % (data_directory, added_csv), start_date=start_date))
self.data_table.append(csv_total)
#print(self.header, self.data_table)
def override_input(self, data_file_name):
"""
Do not use the total refugee count data as the input value, but instead take values from a separate file.
"""
self.override_refugee_input_file = data_file_name
self.override_refugee_input = True
self.header.append("total (modified input)")
self.data_table.append(ConvertCsvFileToNumPyTable("%s" % (data_file_name), start_date=self.start_date))
def get_daily_difference(self, day, day_column=0, count_column=1, Debug=False, FullInterpolation=True):
"""
Extrapolate count of new refugees at a given time point, based on input data.
count_column = column which contains the relevant difference.
FullInterpolation: when disabled, the function ignores any decreases in refugee count.
when enabled, the function can return negative numbers when the new total is higher than the older one.
"""
self.total_refugee_column = count_column
self.days_column = day_column
ref_table = self.data_table[0]
if self.override_refugee_input == True:
ref_table = self.data_table[self._find_headerindex("total (modified input)")]
# Refugees only come in *after* day 0.
if int(day) == 0:
ref_table = self.data_table[0]
new_refugees = 0
for i in self.header[1:]:
new_refugees += self.get_field(i, 0, FullInterpolation)
#print("Day 0 data:",i,self.get_field(i, 0, FullInterpolation))
return int(new_refugees)
else:
new_refugees = 0
for i in self.header[1:]:
new_refugees += self.get_field(i, day, FullInterpolation) - self.get_field(i, day-1, FullInterpolation)
#print self.get_field("Mbera", day), self.get_field("Mbera", day-1)
return int(new_refugees)
# If the day exceeds the validation data table, then we return 0
return 0
def get_interpolated_data(self, column, day):
"""
Gets in a given column for a given day. Interpolates between days as needed.
"""
ref_table = self.data_table[column]
old_val = ref_table[0,self.total_refugee_column]
#print(ref_table[0][self.days_column])
old_day = ref_table[0,self.days_column]
if day <= old_day:
return old_val
for i in range(1, len(ref_table)):
#print(day, ref_table[i][self.days_column])
if day < ref_table[i,self.days_column]:
old_val = ref_table[i-1,self.total_refugee_column]
old_day = ref_table[i-1,self.days_column]
fraction = float(day - old_day) / float(ref_table[i,self.days_column] - old_day)
if fraction > 1.0:
print("Error with days_column: ", ref_table[i,self.days_column])
return -1
#print(day, old_day, ref_table[i][self.total_refugee_column], old_val)
return int(old_val + fraction * float(ref_table[i,self.total_refugee_column] - old_val))
#print("# warning: ref_table length exceeded for column: ",day, self.header[column], ", last ref_table values: ", ref_table[i-1][self.total_refugee_column], ref_table[i][self.days_column])
return int(ref_table[-1,self.total_refugee_column])
def get_raw_data(self, column, day):
"""
Gets in a given column for a given day. Does not Interpolate.
"""
ref_table = self.data_table[column]
old_val = ref_table[0][self.total_refugee_column]
old_day = 0
for i in range (0,len(ref_table)):
if day >= ref_table[i][self.days_column]:
old_val = ref_table[i][self.total_refugee_column]
old_day = ref_table[i][self.days_column]
else:
break
return int(old_val)
def _find_headerindex(self, name):
"""
Finds matching index number for a particular name in the list of headers.
"""
for i in range(0,len(self.header)):
if self.header[i] == name:
return i
print(self.header)
sys.exit("Error: can't find the header %s in the header list" % (name))
def get_field(self, name, day, FullInterpolation=True):
"""
Gets in a given named column for a given day. Interpolates between days if needed.
"""
i = self._find_headerindex(name)
if FullInterpolation:
#print(name, i, day, self.get_interpolated_data(i, day))
return self.get_interpolated_data(i, day)
else:
return self.get_raw_data(i, day)
def print_data_values_for_location(self, name, last_day):
"""
print all data values for selected location.
"""
for i in range(0,last_day):
print(i, self.get_field(name,i))
def is_interpolated(self, name, day):
"""
Checks if data for a given day is inter/extrapolated or not.
"""
for i in range(0,len(self.header)):
if self.header[i] == name:
ref_table = self.data_table[i]
for j in range(0, len(ref_table)):
if int(day) == int(ref_table[j][self.days_column]):
return False
if int(day) < int(ref_table[j][self.days_column]):
return True
return True
#def d.correctLevel1Registrations(name, date):
# correct for start date.
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.servicebus.v2021_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
namespace_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnectionListResult"]:
"""Gets the available PrivateEndpointConnections within a namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.servicebus.v2021_06_01_preview.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
parameters: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Creates or updates PrivateEndpointConnections of service namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to update Status of PrivateEndPoint Connection to
namespace resource.
:type parameters: ~azure.mgmt.servicebus.v2021_06_01_preview.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_06_01_preview.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Private Endpoint Connection.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Gets a description for the specified Private Endpoint Connection.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_06_01_preview.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
|
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pandas as pd
import ibis
from ibis import literal as L
from ibis.compat import unittest, StringIO, Decimal
from ibis.expr.datatypes import Category
from ibis.expr.tests.mocks import MockConnection
from ibis.impala.compiler import ImpalaExprTranslator, to_sql, ImpalaContext
from ibis.sql.tests.test_compiler import ExprTestCases
from ibis.impala.tests.common import ImpalaE2E
import ibis.expr.types as ir
import ibis.expr.api as api
def approx_equal(a, b, eps):
assert abs(a - b) < eps
class ExprSQLTest(object):
def _check_expr_cases(self, cases, context=None, named=False):
for expr, expected in cases:
repr(expr)
result = self._translate(expr, named=named, context=context)
assert result == expected
def _translate(self, expr, named=False, context=None):
translator = ImpalaExprTranslator(expr, context=context, named=named)
return translator.get_result()
class TestValueExprs(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
self.int_cols = ['a', 'b', 'c', 'd']
self.bool_cols = ['h']
self.float_cols = ['e', 'f']
def _check_literals(self, cases):
for value, expected in cases:
lit_expr = L(value)
result = self._translate(lit_expr)
assert result == expected
def test_string_literals(self):
cases = [
('simple', "'simple'"),
('I can\'t', "'I can\\'t'"),
('An "escape"', "'An \"escape\"'")
]
for value, expected in cases:
lit_expr = L(value)
result = self._translate(lit_expr)
assert result == expected
def test_decimal_builtins(self):
t = self.con.table('tpch_lineitem')
col = t.l_extendedprice
cases = [
(col.precision(), 'precision(`l_extendedprice`)'),
(col.scale(), 'scale(`l_extendedprice`)'),
]
self._check_expr_cases(cases)
def test_number_boolean_literals(self):
cases = [
(5, '5'),
(1.5, '1.5'),
(True, 'TRUE'),
(False, 'FALSE')
]
self._check_literals(cases)
def test_column_ref_table_aliases(self):
context = ImpalaContext()
table1 = ibis.table([
('key1', 'string'),
('value1', 'double')
])
table2 = ibis.table([
('key2', 'string'),
('value and2', 'double')
])
context.set_ref(table1, 't0')
context.set_ref(table2, 't1')
expr = table1['value1'] - table2['value and2']
result = self._translate(expr, context=context)
expected = 't0.`value1` - t1.`value and2`'
assert result == expected
def test_column_ref_quoting(self):
schema = [('has a space', 'double')]
table = ibis.table(schema)
self._translate(table['has a space'], '`has a space`')
def test_identifier_quoting(self):
schema = [('date', 'double'), ('table', 'string')]
table = ibis.table(schema)
self._translate(table['date'], '`date`')
self._translate(table['table'], '`table`')
def test_named_expressions(self):
a, b, g = self.table.get_columns(['a', 'b', 'g'])
cases = [
(g.cast('double').name('g_dub'), 'CAST(`g` AS double) AS `g_dub`'),
(g.name('has a space'), '`g` AS `has a space`'),
(((a - b) * a).name('expr'), '(`a` - `b`) * `a` AS `expr`')
]
return self._check_expr_cases(cases, named=True)
def test_binary_infix_operators(self):
# For each function, verify that the generated code is what we expect
a, b, h = self.table.get_columns(['a', 'b', 'h'])
bool_col = a > 0
cases = [
(a + b, '`a` + `b`'),
(a - b, '`a` - `b`'),
(a * b, '`a` * `b`'),
(a / b, '`a` / `b`'),
(a ** b, 'pow(`a`, `b`)'),
(a < b, '`a` < `b`'),
(a <= b, '`a` <= `b`'),
(a > b, '`a` > `b`'),
(a >= b, '`a` >= `b`'),
(a == b, '`a` = `b`'),
(a != b, '`a` != `b`'),
(h & bool_col, '`h` AND (`a` > 0)'),
(h | bool_col, '`h` OR (`a` > 0)'),
# xor is brute force
(h ^ bool_col, '(`h` OR (`a` > 0)) AND NOT (`h` AND (`a` > 0))')
]
self._check_expr_cases(cases)
def test_binary_infix_parenthesization(self):
a, b, c = self.table.get_columns(['a', 'b', 'c'])
cases = [
((a + b) + c, '(`a` + `b`) + `c`'),
(a.log() + c, 'ln(`a`) + `c`'),
(b + (-(a + c)), '`b` + (-(`a` + `c`))')
]
self._check_expr_cases(cases)
def test_between(self):
cases = [
(self.table.f.between(0, 1), '`f` BETWEEN 0 AND 1')
]
self._check_expr_cases(cases)
def test_isnull_notnull(self):
cases = [
(self.table['g'].isnull(), '`g` IS NULL'),
(self.table['a'].notnull(), '`a` IS NOT NULL'),
((self.table['a'] + self.table['b']).isnull(),
'`a` + `b` IS NULL')
]
self._check_expr_cases(cases)
def test_casts(self):
a, d, g = self.table.get_columns(['a', 'd', 'g'])
cases = [
(a.cast('int16'), 'CAST(`a` AS smallint)'),
(a.cast('int32'), 'CAST(`a` AS int)'),
(a.cast('int64'), 'CAST(`a` AS bigint)'),
(a.cast('float'), 'CAST(`a` AS float)'),
(a.cast('double'), 'CAST(`a` AS double)'),
(a.cast('string'), 'CAST(`a` AS string)'),
(d.cast('int8'), 'CAST(`d` AS tinyint)'),
(g.cast('double'), 'CAST(`g` AS double)'),
(g.cast('timestamp'), 'CAST(`g` AS timestamp)')
]
self._check_expr_cases(cases)
def test_misc_conditionals(self):
a = self.table.a
cases = [
(a.nullif(0), 'nullif(`a`, 0)')
]
self._check_expr_cases(cases)
def test_decimal_casts(self):
cases = [
(L('9.9999999').cast('decimal(38,5)'),
"CAST('9.9999999' AS decimal(38,5))"),
(self.table.f.cast('decimal(12,2)'), "CAST(`f` AS decimal(12,2))")
]
self._check_expr_cases(cases)
def test_negate(self):
cases = [
(-self.table['a'], '-`a`'),
(-self.table['f'], '-`f`'),
(-self.table['h'], 'NOT `h`')
]
self._check_expr_cases(cases)
def test_timestamp_extract_field(self):
fields = ['year', 'month', 'day', 'hour', 'minute',
'second', 'millisecond']
cases = [(getattr(self.table.i, field)(),
"extract(`i`, '{0}')".format(field))
for field in fields]
self._check_expr_cases(cases)
# integration with SQL translation
expr = self.table[self.table.i.year().name('year'),
self.table.i.month().name('month'),
self.table.i.day().name('day')]
result = to_sql(expr)
expected = \
"""SELECT extract(`i`, 'year') AS `year`, extract(`i`, 'month') AS `month`,
extract(`i`, 'day') AS `day`
FROM alltypes"""
assert result == expected
def test_timestamp_now(self):
cases = [
(ibis.now(), 'now()')
]
self._check_expr_cases(cases)
def test_timestamp_deltas(self):
units = ['year', 'month', 'week', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']
t = self.table.i
f = '`i`'
cases = []
for unit in units:
K = 5
offset = getattr(ibis, unit)(K)
template = '{0}s_add({1}, {2})'
cases.append((t + offset, template.format(unit, f, K)))
cases.append((t - offset, template.format(unit, f, -K)))
self._check_expr_cases(cases)
def test_timestamp_literals(self):
from pandas import Timestamp
tv1 = '2015-01-01 12:34:56'
ex1 = ("'2015-01-01 12:34:56'")
cases = [
(L(Timestamp(tv1)), ex1),
(L(Timestamp(tv1).to_pydatetime()), ex1),
(ibis.timestamp(tv1), ex1)
]
self._check_expr_cases(cases)
def test_timestamp_from_integer(self):
col = self.table.c
cases = [
(col.to_timestamp(),
'CAST(from_unixtime(`c`, "yyyy-MM-dd HH:mm:ss") '
'AS timestamp)'),
(col.to_timestamp('ms'),
'CAST(from_unixtime(CAST(`c` / 1000 AS int), '
'"yyyy-MM-dd HH:mm:ss") '
'AS timestamp)'),
(col.to_timestamp('us'),
'CAST(from_unixtime(CAST(`c` / 1000000 AS int), '
'"yyyy-MM-dd HH:mm:ss") '
'AS timestamp)'),
]
self._check_expr_cases(cases)
def test_correlated_predicate_subquery(self):
t0 = self.table
t1 = t0.view()
expr = t0.g == t1.g
ctx = ImpalaContext()
ctx.make_alias(t0)
# Grab alias from parent context
subctx = ctx.subcontext()
subctx.make_alias(t1)
subctx.make_alias(t0)
result = self._translate(expr, context=subctx)
expected = "t0.`g` = t1.`g`"
assert result == expected
def test_any_all(self):
t = self.table
bool_expr = t.f == 0
cases = [
(bool_expr.any(), 'sum(`f` = 0) > 0'),
(-bool_expr.any(), 'sum(`f` = 0) = 0'),
(bool_expr.all(), 'sum(`f` = 0) = count(*)'),
(-bool_expr.all(), 'sum(`f` = 0) < count(*)'),
]
self._check_expr_cases(cases)
class TestUnaryBuiltins(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_numeric_unary_builtins(self):
# No argument functions
functions = ['abs', 'ceil', 'floor', 'exp', 'sqrt', 'sign',
('log', 'ln'),
('approx_median', 'appx_median'),
('approx_nunique', 'ndv'),
'ln', 'log2', 'log10', 'nullifzero', 'zeroifnull']
cases = []
for what in functions:
if isinstance(what, tuple):
ibis_name, sql_name = what
else:
ibis_name = sql_name = what
for cname in ['double_col', 'int_col']:
expr = getattr(self.table[cname], ibis_name)()
cases.append((expr, '{0}({1})'.format(
sql_name, '`{0}`'.format(cname))))
self._check_expr_cases(cases)
def test_log_other_bases(self):
cases = [
(self.table.double_col.log(5), 'log(`double_col`, 5)')
]
self._check_expr_cases(cases)
def test_round(self):
cases = [
(self.table.double_col.round(), 'round(`double_col`)'),
(self.table.double_col.round(0), 'round(`double_col`, 0)'),
(self.table.double_col.round(2, ), 'round(`double_col`, 2)'),
(self.table.double_col.round(self.table.tinyint_col),
'round(`double_col`, `tinyint_col`)')
]
self._check_expr_cases(cases)
def test_hash(self):
expr = self.table.int_col.hash()
assert isinstance(expr, ir.Int64Array)
assert isinstance(self.table.int_col.sum().hash(),
ir.Int64Scalar)
cases = [
(self.table.int_col.hash(), 'fnv_hash(`int_col`)')
]
self._check_expr_cases(cases)
def test_reduction_where(self):
cond = self.table.bigint_col < 70
c = self.table.double_col
tmp = ('{0}(CASE WHEN `bigint_col` < 70 THEN `double_col` '
'ELSE NULL END)')
cases = [
(c.sum(where=cond), tmp.format('sum')),
(c.count(where=cond), tmp.format('count')),
(c.mean(where=cond), tmp.format('avg')),
(c.max(where=cond), tmp.format('max')),
(c.min(where=cond), tmp.format('min')),
(c.std(where=cond), tmp.format('stddev')),
(c.std(where=cond, how='pop'), tmp.format('stddev_pop')),
(c.var(where=cond), tmp.format('variance')),
(c.var(where=cond, how='pop'), tmp.format('variance_pop')),
]
self._check_expr_cases(cases)
def test_reduction_invalid_where(self):
condbad_literal = L('T')
c = self.table.double_col
for reduction in [c.sum, c.count, c.mean, c.max, c.min]:
with self.assertRaises(TypeError):
reduction(where=condbad_literal)
class TestCaseExprs(unittest.TestCase, ExprSQLTest, ExprTestCases):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_isnull_1_0(self):
expr = self.table.g.isnull().ifelse(1, 0)
result = self._translate(expr)
expected = 'CASE WHEN `g` IS NULL THEN 1 ELSE 0 END'
assert result == expected
# inside some other function
result = self._translate(expr.sum())
expected = 'sum(CASE WHEN `g` IS NULL THEN 1 ELSE 0 END)'
assert result == expected
def test_simple_case(self):
expr = self._case_simple_case()
result = self._translate(expr)
expected = """CASE `g`
WHEN 'foo' THEN 'bar'
WHEN 'baz' THEN 'qux'
ELSE 'default'
END"""
assert result == expected
def test_search_case(self):
expr = self._case_search_case()
result = self._translate(expr)
expected = """CASE
WHEN `f` > 0 THEN `d` * 2
WHEN `c` < 0 THEN `a` * 2
ELSE NULL
END"""
assert result == expected
def test_where_use_if(self):
expr = ibis.where(self.table.f > 0, self.table.e, self.table.a)
assert isinstance(expr, ir.FloatValue)
result = self._translate(expr)
expected = "if(`f` > 0, `e`, `a`)"
assert result == expected
def test_nullif_ifnull(self):
table = self.con.table('tpch_lineitem')
f = table.l_quantity
cases = [
(f.nullif(f == 0),
'nullif(`l_quantity`, `l_quantity` = 0)'),
(f.fillna(0),
'isnull(`l_quantity`, CAST(0 AS decimal(12,2)))'),
]
self._check_expr_cases(cases)
def test_decimal_fillna_cast_arg(self):
table = self.con.table('tpch_lineitem')
f = table.l_extendedprice
cases = [
(f.fillna(0),
'isnull(`l_extendedprice`, CAST(0 AS decimal(12,2)))'),
(f.fillna(0.0), 'isnull(`l_extendedprice`, 0.0)'),
]
self._check_expr_cases(cases)
class TestBucketHistogram(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_bucket_to_case(self):
buckets = [0, 10, 25, 50]
expr1 = self.table.f.bucket(buckets)
expected1 = """\
CASE
WHEN (`f` >= 0) AND (`f` < 10) THEN 0
WHEN (`f` >= 10) AND (`f` < 25) THEN 1
WHEN (`f` >= 25) AND (`f` <= 50) THEN 2
ELSE NULL
END"""
expr2 = self.table.f.bucket(buckets, close_extreme=False)
expected2 = """\
CASE
WHEN (`f` >= 0) AND (`f` < 10) THEN 0
WHEN (`f` >= 10) AND (`f` < 25) THEN 1
WHEN (`f` >= 25) AND (`f` < 50) THEN 2
ELSE NULL
END"""
expr3 = self.table.f.bucket(buckets, closed='right')
expected3 = """\
CASE
WHEN (`f` >= 0) AND (`f` <= 10) THEN 0
WHEN (`f` > 10) AND (`f` <= 25) THEN 1
WHEN (`f` > 25) AND (`f` <= 50) THEN 2
ELSE NULL
END"""
expr4 = self.table.f.bucket(buckets, closed='right',
close_extreme=False)
expected4 = """\
CASE
WHEN (`f` > 0) AND (`f` <= 10) THEN 0
WHEN (`f` > 10) AND (`f` <= 25) THEN 1
WHEN (`f` > 25) AND (`f` <= 50) THEN 2
ELSE NULL
END"""
expr5 = self.table.f.bucket(buckets, include_under=True)
expected5 = """\
CASE
WHEN `f` < 0 THEN 0
WHEN (`f` >= 0) AND (`f` < 10) THEN 1
WHEN (`f` >= 10) AND (`f` < 25) THEN 2
WHEN (`f` >= 25) AND (`f` <= 50) THEN 3
ELSE NULL
END"""
expr6 = self.table.f.bucket(buckets,
include_under=True,
include_over=True)
expected6 = """\
CASE
WHEN `f` < 0 THEN 0
WHEN (`f` >= 0) AND (`f` < 10) THEN 1
WHEN (`f` >= 10) AND (`f` < 25) THEN 2
WHEN (`f` >= 25) AND (`f` <= 50) THEN 3
WHEN `f` > 50 THEN 4
ELSE NULL
END"""
expr7 = self.table.f.bucket(buckets,
close_extreme=False,
include_under=True,
include_over=True)
expected7 = """\
CASE
WHEN `f` < 0 THEN 0
WHEN (`f` >= 0) AND (`f` < 10) THEN 1
WHEN (`f` >= 10) AND (`f` < 25) THEN 2
WHEN (`f` >= 25) AND (`f` < 50) THEN 3
WHEN `f` >= 50 THEN 4
ELSE NULL
END"""
expr8 = self.table.f.bucket(buckets, closed='right',
close_extreme=False,
include_under=True)
expected8 = """\
CASE
WHEN `f` <= 0 THEN 0
WHEN (`f` > 0) AND (`f` <= 10) THEN 1
WHEN (`f` > 10) AND (`f` <= 25) THEN 2
WHEN (`f` > 25) AND (`f` <= 50) THEN 3
ELSE NULL
END"""
expr9 = self.table.f.bucket([10], closed='right',
include_over=True,
include_under=True)
expected9 = """\
CASE
WHEN `f` <= 10 THEN 0
WHEN `f` > 10 THEN 1
ELSE NULL
END"""
expr10 = self.table.f.bucket([10], include_over=True,
include_under=True)
expected10 = """\
CASE
WHEN `f` < 10 THEN 0
WHEN `f` >= 10 THEN 1
ELSE NULL
END"""
cases = [
(expr1, expected1),
(expr2, expected2),
(expr3, expected3),
(expr4, expected4),
(expr5, expected5),
(expr6, expected6),
(expr7, expected7),
(expr8, expected8),
(expr9, expected9),
(expr10, expected10),
]
self._check_expr_cases(cases)
def test_cast_category_to_int_noop(self):
# Because the bucket result is an integer, no explicit cast is
# necessary
expr = (self.table.f.bucket([10], include_over=True,
include_under=True)
.cast('int32'))
expected = """\
CASE
WHEN `f` < 10 THEN 0
WHEN `f` >= 10 THEN 1
ELSE NULL
END"""
expr2 = (self.table.f.bucket([10], include_over=True,
include_under=True)
.cast('double'))
expected2 = """\
CAST(CASE
WHEN `f` < 10 THEN 0
WHEN `f` >= 10 THEN 1
ELSE NULL
END AS double)"""
self._check_expr_cases([(expr, expected),
(expr2, expected2)])
def test_bucket_assign_labels(self):
buckets = [0, 10, 25, 50]
bucket = self.table.f.bucket(buckets, include_under=True)
size = self.table.group_by(bucket.name('tier')).size()
labelled = size.tier.label(['Under 0', '0 to 10',
'10 to 25', '25 to 50'],
nulls='error').name('tier2')
expr = size[labelled, size['count']]
expected = """\
SELECT
CASE `tier`
WHEN 0 THEN 'Under 0'
WHEN 1 THEN '0 to 10'
WHEN 2 THEN '10 to 25'
WHEN 3 THEN '25 to 50'
ELSE 'error'
END AS `tier2`, `count`
FROM (
SELECT
CASE
WHEN `f` < 0 THEN 0
WHEN (`f` >= 0) AND (`f` < 10) THEN 1
WHEN (`f` >= 10) AND (`f` < 25) THEN 2
WHEN (`f` >= 25) AND (`f` <= 50) THEN 3
ELSE NULL
END AS `tier`, count(*) AS `count`
FROM alltypes
GROUP BY 1
) t0"""
result = to_sql(expr)
assert result == expected
self.assertRaises(ValueError, size.tier.label, ['a', 'b', 'c'])
self.assertRaises(ValueError, size.tier.label,
['a', 'b', 'c', 'd', 'e'])
class TestInNotIn(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_field_in_literals(self):
cases = [
(self.table.g.isin(["foo", "bar", "baz"]),
"`g` IN ('foo', 'bar', 'baz')"),
(self.table.g.notin(["foo", "bar", "baz"]),
"`g` NOT IN ('foo', 'bar', 'baz')")
]
self._check_expr_cases(cases)
def test_literal_in_list(self):
cases = [
(L(2).isin([self.table.a, self.table.b, self.table.c]),
'2 IN (`a`, `b`, `c`)'),
(L(2).notin([self.table.a, self.table.b, self.table.c]),
'2 NOT IN (`a`, `b`, `c`)')
]
self._check_expr_cases(cases)
def test_isin_notin_in_select(self):
filtered = self.table[self.table.g.isin(["foo", "bar"])]
result = to_sql(filtered)
expected = """SELECT *
FROM alltypes
WHERE `g` IN ('foo', 'bar')"""
assert result == expected
filtered = self.table[self.table.g.notin(["foo", "bar"])]
result = to_sql(filtered)
expected = """SELECT *
FROM alltypes
WHERE `g` NOT IN ('foo', 'bar')"""
assert result == expected
class TestCoalesceGreaterLeast(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_coalesce(self):
t = self.table
cases = [
(ibis.coalesce(t.string_col, 'foo'),
"coalesce(`string_col`, 'foo')"),
(ibis.coalesce(t.int_col, t.bigint_col),
'coalesce(`int_col`, `bigint_col`)'),
]
self._check_expr_cases(cases)
def test_greatest(self):
t = self.table
cases = [
(ibis.greatest(t.string_col, 'foo'),
"greatest(`string_col`, 'foo')"),
(ibis.greatest(t.int_col, t.bigint_col),
'greatest(`int_col`, `bigint_col`)'),
]
self._check_expr_cases(cases)
def test_least(self):
t = self.table
cases = [
(ibis.least(t.string_col, 'foo'),
"least(`string_col`, 'foo')"),
(ibis.least(t.int_col, t.bigint_col),
'least(`int_col`, `bigint_col`)'),
]
self._check_expr_cases(cases)
class TestAnalyticFunctions(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_analytic_exprs(self):
t = self.table
w = ibis.window(order_by=t.float_col)
cases = [
(ibis.row_number().over(w),
'row_number() OVER (ORDER BY `float_col`) - 1'),
(t.string_col.lag(), 'lag(`string_col`)'),
(t.string_col.lag(2), 'lag(`string_col`, 2)'),
(t.string_col.lag(default=0), 'lag(`string_col`, 1, 0)'),
(t.string_col.lead(), 'lead(`string_col`)'),
(t.string_col.lead(2), 'lead(`string_col`, 2)'),
(t.string_col.lead(default=0), 'lead(`string_col`, 1, 0)'),
(t.double_col.first(), 'first_value(`double_col`)'),
(t.double_col.last(), 'last_value(`double_col`)'),
# (t.double_col.nth(4), 'first_value(lag(double_col, 4 - 1))')
]
self._check_expr_cases(cases)
class TestStringBuiltins(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_unary_ops(self):
s = self.table.string_col
cases = [
(s.lower(), 'lower(`string_col`)'),
(s.upper(), 'upper(`string_col`)'),
(s.reverse(), 'reverse(`string_col`)'),
(s.strip(), 'trim(`string_col`)'),
(s.lstrip(), 'ltrim(`string_col`)'),
(s.rstrip(), 'rtrim(`string_col`)'),
(s.capitalize(), 'initcap(`string_col`)'),
(s.length(), 'length(`string_col`)'),
(s.ascii_str(), 'ascii(`string_col`)')
]
self._check_expr_cases(cases)
def test_substr(self):
# Database numbers starting from 1
cases = [
(self.table.string_col.substr(2), 'substr(`string_col`, 2 + 1)'),
(self.table.string_col.substr(0, 3),
'substr(`string_col`, 0 + 1, 3)')
]
self._check_expr_cases(cases)
def test_strright(self):
cases = [
(self.table.string_col.right(4), 'strright(`string_col`, 4)')
]
self._check_expr_cases(cases)
def test_like(self):
cases = [
(self.table.string_col.like('foo%'), "`string_col` LIKE 'foo%'")
]
self._check_expr_cases(cases)
def test_rlike(self):
ex = "`string_col` RLIKE '[\d]+'"
cases = [
(self.table.string_col.rlike('[\d]+'), ex),
(self.table.string_col.re_search('[\d]+'), ex),
]
self._check_expr_cases(cases)
def test_re_extract(self):
sql = "regexp_extract(`string_col`, '[\d]+', 0)"
cases = [
(self.table.string_col.re_extract('[\d]+', 0), sql)
]
self._check_expr_cases(cases)
def test_re_replace(self):
sql = "regexp_replace(`string_col`, '[\d]+', 'aaa')"
cases = [
(self.table.string_col.re_replace('[\d]+', 'aaa'), sql)
]
self._check_expr_cases(cases)
def test_parse_url(self):
sql = "parse_url(`string_col`, 'HOST')"
cases = [
(self.table.string_col.parse_url('HOST'), sql)
]
self._check_expr_cases(cases)
def test_repeat(self):
cases = [
(self.table.string_col.repeat(2), 'repeat(`string_col`, 2)')
]
self._check_expr_cases(cases)
def test_translate(self):
cases = [
(self.table.string_col.translate('a', 'b'),
"translate(`string_col`, 'a', 'b')")
]
self._check_expr_cases(cases)
def test_find(self):
s = self.table.string_col
i1 = self.table.tinyint_col
cases = [
(s.find('a'), "locate('a', `string_col`) - 1"),
(s.find('a', 2), "locate('a', `string_col`, 3) - 1"),
(s.find('a', start=i1),
"locate('a', `string_col`, `tinyint_col` + 1) - 1")
]
self._check_expr_cases(cases)
def test_lpad(self):
cases = [
(self.table.string_col.lpad(1, 'a'), "lpad(`string_col`, 1, 'a')"),
(self.table.string_col.lpad(25), "lpad(`string_col`, 25, ' ')")
]
self._check_expr_cases(cases)
def test_rpad(self):
cases = [
(self.table.string_col.rpad(1, 'a'), "rpad(`string_col`, 1, 'a')"),
(self.table.string_col.rpad(25), "rpad(`string_col`, 25, ' ')")
]
self._check_expr_cases(cases)
def test_find_in_set(self):
cases = [
(self.table.string_col.find_in_set(['a']),
"find_in_set(`string_col`, 'a') - 1"),
(self.table.string_col.find_in_set(['a', 'b']),
"find_in_set(`string_col`, 'a,b') - 1")
]
self._check_expr_cases(cases)
def test_string_join(self):
cases = [
(L(',').join(['a', 'b']), "concat_ws(',', 'a', 'b')")
]
self._check_expr_cases(cases)
class TestImpalaExprs(ImpalaE2E, unittest.TestCase, ExprTestCases):
def test_embedded_identifier_quoting(self):
t = self.con.table('functional_alltypes')
expr = (t[[(t.double_col * 2).name('double(fun)')]]
['double(fun)'].sum())
expr.execute()
def test_table_info(self):
t = self.con.table('functional_alltypes')
buf = StringIO()
t.info(buf=buf)
assert buf.getvalue() is not None
def test_execute_exprs_no_table_ref(self):
cases = [
(L(1) + L(2), 3)
]
for expr, expected in cases:
result = self.con.execute(expr)
assert result == expected
# ExprList
exlist = ibis.api.expr_list([L(1).name('a'),
ibis.now().name('b'),
L(2).log().name('c')])
self.con.execute(exlist)
def test_summary_execute(self):
table = self.alltypes
# also test set_column while we're at it
table = table.set_column('double_col',
table.double_col * 2)
expr = table.double_col.summary()
repr(expr)
result = expr.execute()
assert isinstance(result, pd.DataFrame)
expr = (table.group_by('string_col')
.aggregate([table.double_col.summary().prefix('double_'),
table.float_col.summary().prefix('float_'),
table.string_col.summary().suffix('_string')]))
result = expr.execute()
assert isinstance(result, pd.DataFrame)
def test_distinct_array(self):
table = self.alltypes
expr = table.string_col.distinct()
result = self.con.execute(expr)
assert isinstance(result, pd.Series)
def test_decimal_metadata(self):
table = self.con.table('tpch_lineitem')
expr = table.l_quantity
assert expr._precision == 12
assert expr._scale == 2
# TODO: what if user impyla version does not have decimal Metadata?
def test_builtins_1(self):
table = self.alltypes
i1 = table.tinyint_col
i4 = table.int_col
i8 = table.bigint_col
d = table.double_col
s = table.string_col
exprs = [
api.now(),
api.e,
# hash functions
i4.hash(),
d.hash(),
s.hash(),
# modulus cases
i1 % 5,
i4 % 10,
20 % i1,
d % 5,
i1.zeroifnull(),
i4.zeroifnull(),
i8.zeroifnull(),
i4.to_timestamp('s'),
i4.to_timestamp('ms'),
i4.to_timestamp('us'),
i8.to_timestamp(),
d.abs(),
d.cast('decimal(12, 2)'),
d.cast('int32'),
d.ceil(),
d.exp(),
d.isnull(),
d.fillna(0),
d.floor(),
d.log(),
d.ln(),
d.log2(),
d.log10(),
d.notnull(),
d.zeroifnull(),
d.nullifzero(),
d.round(),
d.round(2),
d.round(i1),
i1.sign(),
i4.sign(),
d.sign(),
# conv
i1.convert_base(10, 2),
i4.convert_base(10, 2),
i8.convert_base(10, 2),
s.convert_base(10, 2),
d.sqrt(),
d.zeroifnull(),
# nullif cases
5 / i1.nullif(0),
5 / i1.nullif(i4),
5 / i4.nullif(0),
5 / d.nullif(0),
api.literal(5).isin([i1, i4, d]),
# tier and histogram
d.bucket([0, 10, 25, 50, 100]),
d.bucket([0, 10, 25, 50], include_over=True),
d.bucket([0, 10, 25, 50], include_over=True, close_extreme=False),
d.bucket([10, 25, 50, 100], include_under=True),
d.histogram(10),
d.histogram(5, base=10),
d.histogram(base=10, binwidth=5),
# coalesce-like cases
api.coalesce(table.int_col,
api.null(),
table.smallint_col,
table.bigint_col, 5),
api.greatest(table.float_col,
table.double_col, 5),
api.least(table.string_col, 'foo'),
# string stuff
s.contains('6'),
s.like('6%'),
s.re_search('[\d]+'),
s.re_extract('[\d]+', 0),
s.re_replace('[\d]+', 'a'),
s.repeat(2),
s.translate("a", "b"),
s.find("a"),
s.lpad(10, 'a'),
s.rpad(10, 'a'),
s.find_in_set(["a"]),
s.lower(),
s.upper(),
s.reverse(),
s.ascii_str(),
s.length(),
s.strip(),
s.lstrip(),
s.strip(),
# strings with int expr inputs
s.left(i1),
s.right(i1),
s.substr(i1, i1 + 2),
s.repeat(i1)
]
proj_exprs = [expr.name('e%d' % i)
for i, expr in enumerate(exprs)]
projection = table[proj_exprs]
projection.limit(10).execute()
self._check_impala_output_types_match(projection)
def _check_impala_output_types_match(self, table):
query = to_sql(table)
t = self.con.sql(query)
def _clean_type(x):
if isinstance(x, Category):
x = x.to_integer_type()
return x
left, right = t.schema(), table.schema()
for i, (n, l, r) in enumerate(zip(left.names, left.types,
right.types)):
l = _clean_type(l)
r = _clean_type(r)
if l != r:
pytest.fail('Value for {0} had left type {1}'
' and right type {2}'.format(n, l, r))
def assert_cases_equality(self, cases):
for expr, expected in cases:
result = self.con.execute(expr)
assert result == expected, to_sql(expr)
def test_int_builtins(self):
i8 = L(50)
i32 = L(50000)
mod_cases = [
(i8 % 5, 0),
(i32 % 10, 0),
(250 % i8, 0),
]
nullif_cases = [
(5 / i8.nullif(0), 0.1),
(5 / i8.nullif(i32), 0.1),
(5 / i32.nullif(0), 0.0001),
(i32.zeroifnull(), 50000),
]
self.assert_cases_equality(mod_cases + nullif_cases)
def test_column_types(self):
df = self.alltypes.execute()
assert df.tinyint_col.dtype.name == 'int8'
assert df.smallint_col.dtype.name == 'int16'
assert df.int_col.dtype.name == 'int32'
assert df.bigint_col.dtype.name == 'int64'
assert df.float_col.dtype.name == 'float32'
assert df.double_col.dtype.name == 'float64'
assert pd.core.common.is_datetime64_dtype(df.timestamp_col.dtype)
def test_timestamp_builtins(self):
i32 = L(50000)
i64 = L(5 * 10 ** 8)
stamp = ibis.timestamp('2009-05-17 12:34:56')
timestamp_cases = [
(i32.to_timestamp('s'), pd.to_datetime(50000, unit='s')),
(i32.to_timestamp('ms'), pd.to_datetime(50000, unit='ms')),
(i64.to_timestamp(), pd.to_datetime(5 * 10 ** 8, unit='s')),
(stamp.truncate('y'), pd.Timestamp('2009-01-01')),
(stamp.truncate('m'), pd.Timestamp('2009-05-01')),
(stamp.truncate('d'), pd.Timestamp('2009-05-17')),
(stamp.truncate('h'), pd.Timestamp('2009-05-17 12:00')),
(stamp.truncate('minute'), pd.Timestamp('2009-05-17 12:34'))
]
self.assert_cases_equality(timestamp_cases)
def test_decimal_builtins(self):
d = L(5.245)
general_cases = [
(L(-5).abs(), 5),
(d.cast('int32'), 5),
(d.ceil(), 6),
(d.isnull(), False),
(d.floor(), 5),
(d.notnull(), True),
(d.round(), 5),
(d.round(2), Decimal('5.25')),
(d.sign(), 1),
]
self.assert_cases_equality(general_cases)
def test_decimal_builtins_2(self):
d = L('5.245')
dc = d.cast('decimal(12,5)')
cases = [
(dc % 5, Decimal('0.245')),
(dc.fillna(0), Decimal('5.245')),
(dc.exp(), 189.6158),
(dc.log(), 1.65728),
(dc.log2(), 2.39094),
(dc.log10(), 0.71975),
(dc.sqrt(), 2.29019),
(dc.zeroifnull(), Decimal('5.245')),
(-dc, Decimal('-5.245'))
]
for expr, expected in cases:
result = self.con.execute(expr)
if isinstance(expected, Decimal):
tol = Decimal('0.0001')
else:
tol = 0.0001
approx_equal(result, expected, tol)
def test_string_functions(self):
string = L('abcd')
strip_string = L(' a ')
cases = [
(string.length(), 4),
(L('ABCD').lower(), 'abcd'),
(string.upper(), 'ABCD'),
(string.reverse(), 'dcba'),
(string.ascii_str(), 97),
(strip_string.strip(), 'a'),
(strip_string.lstrip(), 'a '),
(strip_string.rstrip(), ' a'),
(string.capitalize(), 'Abcd'),
(string.substr(0, 2), 'ab'),
(string.left(2), 'ab'),
(string.right(2), 'cd'),
(string.repeat(2), 'abcdabcd'),
# global replace not available in Impala yet
# (L('aabbaabbaa').replace('bb', 'B'), 'aaBaaBaa'),
(L('0123').translate('012', 'abc'), 'abc3'),
(string.find('a'), 0),
(L('baaaab').find('b', 2), 5),
(string.lpad(1, '-'), 'a'),
(string.lpad(5), ' abcd'),
(string.rpad(1, '-'), 'a'),
(string.rpad(5), 'abcd '),
(string.find_in_set(['a', 'b', 'abcd']), 2),
(L(', ').join(['a', 'b']), 'a, b'),
(string.like('a%'), True),
(string.re_search('[a-z]'), True),
(string.re_extract('[a-z]', 0), 'a'),
(string.re_replace('(b)', '2'), 'a2cd'),
]
self._check_cases(cases)
def _check_cases(self, cases):
for expr, expected in cases:
result = self.con.execute(expr)
assert result == expected
def test_parse_url(self):
cases = [
(L("https://www.cloudera.com").parse_url('HOST'),
"www.cloudera.com"),
(L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10')
.parse_url('QUERY', 'v'),
'kEuEcWfewf8'),
]
self._check_cases(cases)
def test_div_floordiv(self):
cases = [
(L(7) / 2, 3.5),
(L(7) // 2, 3),
(L(7).floordiv(2), 3),
(L(2).rfloordiv(7), 3),
]
for expr, expected in cases:
result = self.con.execute(expr)
assert result == expected
def test_filter_predicates(self):
t = self.con.table('tpch_nation')
predicates = [
lambda x: x.n_name.lower().like('%ge%'),
lambda x: x.n_name.lower().contains('ge'),
lambda x: x.n_name.lower().rlike('.*ge.*')
]
expr = t
for pred in predicates:
expr = expr[pred(expr)].projection([expr])
expr.execute()
def test_histogram_value_counts(self):
t = self.alltypes
expr = t.double_col.histogram(10).value_counts()
expr.execute()
def test_casted_expr_impala_bug(self):
# Per GH #396. Prior to Impala 2.3.0, there was a bug in the query
# planner that caused this expression to fail
expr = self.alltypes.string_col.cast('double').value_counts()
expr.execute()
def test_decimal_timestamp_builtins(self):
table = self.con.table('tpch_lineitem')
dc = table.l_quantity
ts = table.l_receiptdate.cast('timestamp')
exprs = [
dc % 10,
dc + 5,
dc + dc,
dc / 2,
dc * 2,
dc ** 2,
dc.cast('double'),
api.where(table.l_discount > 0,
dc * table.l_discount, api.NA),
dc.fillna(0),
ts < (ibis.now() + ibis.month(3)),
ts < (ibis.timestamp('2005-01-01') + ibis.month(3)),
# hashing
dc.hash(),
ts.hash(),
# truncate
ts.truncate('y'),
ts.truncate('q'),
ts.truncate('month'),
ts.truncate('d'),
ts.truncate('w'),
ts.truncate('h'),
ts.truncate('minute'),
]
timestamp_fields = ['year', 'month', 'day', 'hour', 'minute',
'second', 'millisecond', 'microsecond',
'week']
for field in timestamp_fields:
if hasattr(ts, field):
exprs.append(getattr(ts, field)())
offset = getattr(ibis, field)(2)
exprs.append(ts + offset)
exprs.append(ts - offset)
proj_exprs = [expr.name('e%d' % i)
for i, expr in enumerate(exprs)]
projection = table[proj_exprs].limit(10)
projection.execute()
def test_timestamp_scalar_in_filter(self):
# #310
table = self.alltypes
expr = (table.filter([table.timestamp_col <
(ibis.timestamp('2010-01-01') + ibis.month(3)),
table.timestamp_col < (ibis.now() + ibis.day(10))
])
.count())
expr.execute()
def test_aggregations(self):
table = self.alltypes.limit(100)
d = table.double_col
s = table.string_col
cond = table.string_col.isin(['1', '7'])
exprs = [
table.bool_col.count(),
d.sum(),
d.mean(),
d.min(),
d.max(),
s.approx_nunique(),
d.approx_median(),
s.group_concat(),
d.std(),
d.std(how='pop'),
d.var(),
d.var(how='pop'),
table.bool_col.any(),
table.bool_col.notany(),
-table.bool_col.any(),
table.bool_col.all(),
table.bool_col.notall(),
-table.bool_col.all(),
table.bool_col.count(where=cond),
d.sum(where=cond),
d.mean(where=cond),
d.min(where=cond),
d.max(where=cond),
d.std(where=cond),
d.var(where=cond),
]
agg_exprs = [expr.name('e%d' % i)
for i, expr in enumerate(exprs)]
agged_table = table.aggregate(agg_exprs)
agged_table.execute()
def test_analytic_functions(self):
t = self.alltypes.limit(1000)
g = t.group_by('string_col').order_by('double_col')
f = t.float_col
exprs = [
f.lag(),
f.lead(),
f.rank(),
f.dense_rank(),
f.first(),
f.last(),
f.first().over(ibis.window(preceding=10)),
f.first().over(ibis.window(following=10)),
ibis.row_number(),
f.cumsum(),
f.cummean(),
f.cummin(),
f.cummax(),
# boolean cumulative reductions
(f == 0).cumany(),
(f == 0).cumall(),
f.sum(),
f.mean(),
f.min(),
f.max()
]
proj_exprs = [expr.name('e%d' % i)
for i, expr in enumerate(exprs)]
proj_table = g.mutate(proj_exprs)
proj_table.execute()
def test_anti_join_self_reference_works(self):
case = self._case_self_reference_limit_exists()
self.con.explain(case)
def test_tpch_self_join_failure(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [
region.r_name.name('region'),
nation.n_name.name('nation'),
orders.o_totalprice.name('amount'),
orders.o_orderdate.cast('timestamp').name('odate')]
joined_all = (
region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey)
[fields_of_interest])
year = joined_all.odate.year().name('year')
total = joined_all.amount.sum().cast('double').name('total')
annual_amounts = (joined_all
.group_by(['region', year])
.aggregate(total))
current = annual_amounts
prior = annual_amounts.view()
yoy_change = (current.total - prior.total).name('yoy_change')
yoy = (current.join(prior, ((current.region == prior.region) &
(current.year == (prior.year - 1))))
[current.region, current.year, yoy_change])
# no analysis failure
self.con.explain(yoy)
def test_tpch_correlated_subquery_failure(self):
# #183 and other issues
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [customer,
region.r_name.name('region'),
orders.o_totalprice.name('amount'),
orders.o_orderdate
.cast('timestamp').name('odate')]
tpch = (region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey)
[fields_of_interest])
t2 = tpch.view()
conditional_avg = t2[(t2.region == tpch.region)].amount.mean()
amount_filter = tpch.amount > conditional_avg
expr = tpch[amount_filter].limit(0)
self.con.explain(expr)
def test_non_equijoin(self):
t = self.con.table('functional_alltypes').limit(100)
t2 = t.view()
expr = t.join(t2, t.tinyint_col < t2.timestamp_col.minute()).count()
# it works
expr.execute()
def test_char_varchar_types(self):
sql = """\
SELECT CAST(string_col AS varchar(20)) AS varchar_col,
CAST(string_col AS CHAR(5)) AS char_col
FROM functional_alltypes"""
t = self.con.sql(sql)
assert isinstance(t.varchar_col, api.StringArray)
assert isinstance(t.char_col, api.StringArray)
|
|
from datetime import datetime, timedelta
from xml.dom import minidom
import logging
from django.utils.timezone import now, utc
from celery.task import task
from celery.task.sets import subtask
from gargoyle import gargoyle
from eve_proxy.exceptions import *
from eve_proxy.models import CachedDocument
from eve_api.api_exceptions import *
from eve_api.models import EVEPlayerCorporation, EVEPlayerCharacter, EVEPlayerCharacterRole, EVEPlayerCharacterSkill, EVESkill, EVEAccount, EVEPlayerCharacterEmploymentHistory
from eve_api.app_defines import *
from eve_api.utils import basic_xml_parse, basic_xml_parse_doc
@task()
def import_eve_character(character_id, key_id=None, callback=None, **kwargs):
"""
Imports a character from the API, providing a API key will populate
further details. Returns a single EVEPlayerCharacter object
"""
log = import_eve_character.get_logger()
try:
pchar = import_eve_character_func(character_id, key_id, log)
except APIAccessException, exc:
log.debug('Error importing character - flagging for retry')
import_eve_character.retry(args=[character_id, key_id, callback], exc=exc, kwargs=kwargs)
if not pchar:
log.debug('Error importing character %s' % character_id)
else:
if callback:
subtask(callback).delay(character=pchar.id)
else:
return pchar
@task()
def import_eve_characters(character_list, key_id=None, callback=None, **kwargs):
"""
Imports characters from the API, providing a API key will populate
further details. Returns a list of EVEPlayerCharacter objects
"""
log = import_eve_characters.get_logger()
try:
results = [import_eve_character_func(char, key_id, log) for char in character_list]
except APIAccessException, exc:
log.debug('Error importing characters - flagging for retry')
import_eve_characters.retry(args=[character_list, key_id, callback], exc=exc, kwargs=kwargs)
if callback:
subtask(callback).delay(characters=results)
else:
return results
def import_eve_character_func(character_id, key_id=None, logger=logging.getLogger(__name__)):
if int(character_id) >= 3000000 and int(character_id) < 4000000:
# NPC character
pchar, created = EVEPlayerCharacter.objects.get_or_create(pk=character_id)
return pchar
try:
char_doc = CachedDocument.objects.api_query('/eve/CharacterInfo.xml.aspx', params={'characterID': character_id}, no_cache=False)
except DocumentRetrievalError, exc:
logger.debug('Error retrieving CharacterInfo.xml.aspx for Character ID %s - %s' % (character_id, exc))
raise APIAccessException('Error retrieving CharacterInfo.xml.aspx for Character ID %s - %s' % (character_id, exc))
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
logger.debug('EVE API Error enountered in API document')
return
values = d['result']
pchar, created = EVEPlayerCharacter.objects.get_or_create(id=character_id)
# Set the character's name, avoid oddities in the XML feed
if not values['characterName'] == {}:
pchar.name = values['characterName']
else:
pchar.name = ""
pchar.security_status = values['securityStatus']
# Set corporation and join date
corp, created = EVEPlayerCorporation.objects.get_or_create(pk=values['corporationID'])
from eve_api.tasks.corporation import import_corp_details
if created or not corp.name or corp.api_last_updated < (now() - timedelta(hours=12)):
import_corp_details.delay(values['corporationID'])
pchar.corporation = corp
pchar.corporation_date = datetime.strptime(values['corporationDate'], "%Y-%m-%d %H:%M:%S").replace(tzinfo=utc)
# Derrive Race value from the choices
for v in API_RACES_CHOICES:
val, race = v
if race == values['race']:
pchar.race = val
break
# Import employment history if its made available
if 'employmentHistory' in values:
reclist = pchar.employmenthistory.values_list('pk', flat=True)
for emp in values['employmentHistory']:
if not emp['recordID'] in reclist:
corp, created = EVEPlayerCorporation.objects.get_or_create(pk=emp['corporationID'])
if created:
import_corp_details.delay(emp['corporationID'])
startdate = datetime.strptime(emp['startDate'], "%Y-%m-%d %H:%M:%S").replace(tzinfo=utc)
eobj, created = EVEPlayerCharacterEmploymentHistory.objects.get_or_create(pk=emp['recordID'], corporation=corp, character=pchar, start_date=startdate)
# We've been passed a Key ID, try and work with it
if key_id:
try:
acc = EVEAccount.objects.get(pk=key_id)
except EVEAccount.DoesNotExist:
acc = None
else:
acc = None
# If we have a key, call CharSheet
if acc and acc.has_access(3) and not acc.api_keytype == API_KEYTYPE_CORPORATION:
if gargoyle.is_active('eve-cak') and acc.is_cak:
auth_params = {'keyid': acc.api_user_id, 'vcode': acc.api_key, 'characterid': character_id }
else:
auth_params = {'userID': acc.api_user_id, 'apiKey': acc.api_key, 'characterID': character_id }
try:
char_doc = CachedDocument.objects.api_query('/char/CharacterSheet.xml.aspx', params=auth_params, no_cache=False)
except DocumentRetrievalError, exc:
logger.debug('Error retrieving CharacterSheet.xml.aspx for User ID %s, Character ID %s - %s' % (acc.pk, character_id, exc))
raise APIAccessException('Error retrieving CharacterSheet.xml.aspx for User ID %s, Character ID %s - %s' % (acc.pk, character_id, exc.value))
doc = basic_xml_parse_doc(char_doc)['eveapi']
if not 'error' in doc:
values = doc['result']
pchar.name = values['name']
pchar.balance = values['balance']
pchar.attrib_intelligence = values['attributes']['intelligence']
pchar.attrib_charisma = values['attributes']['charisma']
pchar.attrib_perception = values['attributes']['perception']
pchar.attrib_willpower = values['attributes']['willpower']
pchar.attrib_memory = values['attributes']['memory']
# Process the character's skills
pchar.total_sp = 0
for skill in values.get('skills', None):
skillobj, created = EVESkill.objects.get_or_create(pk=skill['typeID'])
charskillobj, created = EVEPlayerCharacterSkill.objects.get_or_create(skill=skillobj, character=pchar)
if created or not charskillobj.level == int(skill['level']) or not charskillobj.skillpoints == int(skill['skillpoints']):
charskillobj.level = int(skill['level'])
charskillobj.skillpoints = int(skill['skillpoints'])
charskillobj.save()
pchar.total_sp = pchar.total_sp + int(skill['skillpoints'])
if acc.has_access(18):
try:
skillqueue = CachedDocument.objects.api_query('/char/SkillInTraining.xml.aspx', params=auth_params, no_cache=False)
except DocumentRetrievalError, exc:
logger.debug('Error retrieving SkillInTraining.xml.aspx for User ID %s, Character ID %s - %s' % (key_id, character_id, exc))
else:
queuedoc = basic_xml_parse_doc(skillqueue)
if not 'error' in queuedoc['eveapi'] and 'result' in queuedoc['eveapi']:
queuedoc = queuedoc['eveapi']['result']
EVEPlayerCharacterSkill.objects.filter(character=pchar).update(in_training=0)
if int(queuedoc['skillInTraining']):
skillobj, created = EVESkill.objects.get_or_create(pk=queuedoc['trainingTypeID'])
charskillobj, created = EVEPlayerCharacterSkill.objects.get_or_create(skill=skillobj, character=pchar)
charskillobj.in_training = queuedoc['trainingToLevel']
charskillobj.save()
else:
EVEPlayerCharacterSkill.objects.filter(character=pchar).update(in_training=0)
# Process the character's roles
pchar.roles.clear()
roles = values.get('corporationRoles', None)
if roles and len(roles):
for r in roles:
role, created = EVEPlayerCharacterRole.objects.get_or_create(roleid=r['roleID'], name=r['roleName'])
pchar.roles.add(role)
if values['gender'] == 'Male':
pchar.gender = API_GENDER_MALE
else:
pchar.gender = API_GENDER_FEMALE
pchar.api_last_updated = now()
pchar.save()
if acc:
if not pchar.id in acc.characters.all().values_list('id', flat=True):
acc.characters.add(pchar)
if pchar.director and acc.api_keytype in [API_KEYTYPE_FULL, API_KEYTYPE_CORPORATION]:
from eve_api.tasks.corporation import import_corp_members
import_corp_members.delay(key_id=acc.pk, character_id=pchar.id)
return pchar
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes + cinder.
"""
import copy
import sys
from cinderclient import exceptions as cinder_exception
from cinderclient import service_catalog
from cinderclient.v1 import client as cinder_client
from oslo.config import cfg
from nova.db import base
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
cinder_opts = [
cfg.StrOpt('cinder_catalog_info',
default='volume:cinder:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>'),
cfg.StrOpt('cinder_endpoint_template',
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
cfg.StrOpt('os_region_name',
help='Region name of this node'),
cfg.StrOpt('cinder_ca_certificates_file',
help='Location of ca certificates file to use for cinder '
'client requests.'),
cfg.IntOpt('cinder_http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
cfg.BoolOpt('cinder_api_insecure',
default=False,
help='Allow to perform insecure SSL requests to cinder'),
cfg.BoolOpt('cinder_cross_az_attach',
default=True,
help='Allow attach between instance and volume in different '
'availability zones.'),
]
CONF = cfg.CONF
CONF.register_opts(cinder_opts)
LOG = logging.getLogger(__name__)
def cinderclient(context):
# FIXME: the cinderclient ServiceCatalog object is mis-named.
# It actually contains the entire access blob.
# Only needed parts of the service catalog are passed in, see
# nova/context.py.
compat_catalog = {
'access': {'serviceCatalog': context.service_catalog or []}
}
sc = service_catalog.ServiceCatalog(compat_catalog)
if CONF.cinder_endpoint_template:
url = CONF.cinder_endpoint_template % context.to_dict()
else:
info = CONF.cinder_catalog_info
service_type, service_name, endpoint_type = info.split(':')
# extract the region if set in configuration
if CONF.os_region_name:
attr = 'region'
filter_value = CONF.os_region_name
else:
attr = None
filter_value = None
url = sc.url_for(attr=attr,
filter_value=filter_value,
service_type=service_type,
service_name=service_name,
endpoint_type=endpoint_type)
LOG.debug(_('Cinderclient connection created using URL: %s') % url)
c = cinder_client.Client(context.user_id,
context.auth_token,
project_id=context.project_id,
auth_url=url,
insecure=CONF.cinder_api_insecure,
retries=CONF.cinder_http_retries,
cacert=CONF.cinder_ca_certificates_file)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
context.project_id)
c.client.management_url = url
return c
def _untranslate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
# TODO(jdg): The calling code expects attach_time and
# mountpoint to be set. When the calling
# code is more defensive this can be
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
if vol.attachments:
att = vol.attachments[0]
d['attach_status'] = 'attached'
d['instance_uuid'] = att['server_id']
d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
d['display_name'] = vol.display_name
d['display_description'] = vol.display_description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['volume_metadata'] = {}
for key, value in vol.metadata.items():
d['volume_metadata'][key] = value
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
return d
def _untranslate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot.id
d['status'] = snapshot.status
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
d['display_name'] = snapshot.display_name
d['display_description'] = snapshot.display_description
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
return d
def translate_volume_exception(method):
"""Transforms the exception for the volume but keeps its traceback intact.
"""
def wrapper(self, ctx, volume_id, *args, **kwargs):
try:
res = method(self, ctx, volume_id, *args, **kwargs)
except cinder_exception.ClientException:
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, cinder_exception.NotFound):
exc_value = exception.VolumeNotFound(volume_id=volume_id)
elif isinstance(exc_value, cinder_exception.BadRequest):
exc_value = exception.InvalidInput(reason=exc_value.message)
raise exc_value, None, exc_trace
return res
return wrapper
def translate_snapshot_exception(method):
"""Transforms the exception for the snapshot but keeps its traceback
intact.
"""
def wrapper(self, ctx, snapshot_id, *args, **kwargs):
try:
res = method(self, ctx, snapshot_id, *args, **kwargs)
except cinder_exception.ClientException:
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, cinder_exception.NotFound):
exc_value = exception.SnapshotNotFound(snapshot_id=snapshot_id)
raise exc_value, None, exc_trace
return res
return wrapper
class API(base.Base):
"""API for interacting with the volume manager."""
@translate_volume_exception
def get(self, context, volume_id):
item = cinderclient(context).volumes.get(volume_id)
return _untranslate_volume_summary_view(context, item)
def get_all(self, context, search_opts={}):
items = cinderclient(context).volumes.list(detailed=True)
rval = []
for item in items:
rval.append(_untranslate_volume_summary_view(context, item))
return rval
def check_attached(self, context, volume):
"""Raise exception if volume in use."""
if volume['status'] != "in-use":
msg = _("status must be 'in-use'")
raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("status must be 'available'")
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
if instance and not CONF.cinder_cross_az_attach:
if instance['availability_zone'] != volume['availability_zone']:
msg = _("Instance and volume not in same availability_zone")
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("already detached")
raise exception.InvalidVolume(reason=msg)
@translate_volume_exception
def reserve_volume(self, context, volume_id):
cinderclient(context).volumes.reserve(volume_id)
@translate_volume_exception
def unreserve_volume(self, context, volume_id):
cinderclient(context).volumes.unreserve(volume_id)
@translate_volume_exception
def begin_detaching(self, context, volume_id):
cinderclient(context).volumes.begin_detaching(volume_id)
@translate_volume_exception
def roll_detaching(self, context, volume_id):
cinderclient(context).volumes.roll_detaching(volume_id)
@translate_volume_exception
def attach(self, context, volume_id, instance_uuid, mountpoint):
cinderclient(context).volumes.attach(volume_id, instance_uuid,
mountpoint)
@translate_volume_exception
def detach(self, context, volume_id):
cinderclient(context).volumes.detach(volume_id)
@translate_volume_exception
def initialize_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.initialize_connection(volume_id,
connector)
@translate_volume_exception
def terminate_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.terminate_connection(volume_id,
connector)
def migrate_volume_completion(self, context, old_volume_id, new_volume_id,
error=False):
return cinderclient(context).volumes.migrate_volume_completion(
old_volume_id, new_volume_id, error)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
if snapshot is not None:
snapshot_id = snapshot['id']
else:
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
display_name=name,
display_description=description,
volume_type=volume_type,
user_id=context.user_id,
project_id=context.project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=image_id)
try:
item = cinderclient(context).volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
except cinder_exception.BadRequest as e:
raise exception.InvalidInput(reason=unicode(e))
@translate_volume_exception
def delete(self, context, volume_id):
cinderclient(context).volumes.delete(volume_id)
@translate_volume_exception
def update(self, context, volume_id, fields):
raise NotImplementedError()
@translate_snapshot_exception
def get_snapshot(self, context, snapshot_id):
item = cinderclient(context).volume_snapshots.get(snapshot_id)
return _untranslate_snapshot_summary_view(context, item)
def get_all_snapshots(self, context):
items = cinderclient(context).volume_snapshots.list(detailed=True)
rvals = []
for item in items:
rvals.append(_untranslate_snapshot_summary_view(context, item))
return rvals
@translate_volume_exception
def create_snapshot(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
False,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_volume_exception
def create_snapshot_force(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
True,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_snapshot_exception
def delete_snapshot(self, context, snapshot_id):
cinderclient(context).volume_snapshots.delete(snapshot_id)
def get_volume_encryption_metadata(self, context, volume_id):
return cinderclient(context).volumes.get_encryption_metadata(volume_id)
@translate_volume_exception
def get_volume_metadata(self, context, volume_id):
raise NotImplementedError()
@translate_volume_exception
def delete_volume_metadata(self, context, volume_id, key):
raise NotImplementedError()
@translate_volume_exception
def update_volume_metadata(self, context, volume_id,
metadata, delete=False):
raise NotImplementedError()
@translate_volume_exception
def get_volume_metadata_value(self, volume_id, key):
raise NotImplementedError()
@translate_snapshot_exception
def update_snapshot_status(self, context, snapshot_id, status):
vs = cinderclient(context).volume_snapshots
# '90%' here is used to tell Cinder that Nova is done
# with its portion of the 'creating' state. This can
# be removed when we are able to split the Cinder states
# into 'creating' and a separate state of
# 'creating_in_nova'. (Same for 'deleting' state.)
vs.update_snapshot_status(
snapshot_id,
{'status': status,
'progress': '90%'}
)
|
|
#!/usr/bin/env python3
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'numpy'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'numpy'
VENDOR = 'NumPy'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
flags = ['-3', '--fast-fail']
if tofile.endswith('.cxx'):
flags.append('--cplus')
try:
# try the cython in the installed python first (somewhat related to scipy/scipy#2397)
import Cython
from Cython.Compiler.Version import version as cython_version
except ImportError:
# The `cython` command need not point to the version installed in the
# Python running this script, so raise an error to avoid the chance of
# using the wrong version of Cython.
raise OSError('Cython needs to be installed in Python as a module')
else:
# check the version, and invoke through python
from distutils.version import LooseVersion
# Cython 0.29.21 is required for Python 3.9 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with that in pyproject.toml
required_version = LooseVersion('0.29.21')
if LooseVersion(cython_version) < required_version:
cython_path = Cython.__file__
raise RuntimeError(f'Building {VENDOR} requires Cython >= {required_version}'
f', found {cython_version} at {cython_path}')
subprocess.check_call(
[sys.executable, '-m', 'cython'] + flags + ["-o", tofile, fromfile])
def process_tempita_pyx(fromfile, tofile):
import npy_tempita as tempita
assert fromfile.endswith('.pyx.in')
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
def process_tempita_pyd(fromfile, tofile):
import npy_tempita as tempita
assert fromfile.endswith('.pxd.in')
assert tofile.endswith('.pxd')
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
with open(tofile, "w") as f:
f.write(pyxcontent)
def process_tempita_pxi(fromfile, tofile):
import npy_tempita as tempita
assert fromfile.endswith('.pxi.in')
assert tofile.endswith('.pxi')
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
with open(tofile, "w") as f:
f.write(pyxcontent)
def process_tempita_pxd(fromfile, tofile):
import npy_tempita as tempita
assert fromfile.endswith('.pxd.in')
assert tofile.endswith('.pxd')
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
with open(tofile, "w") as f:
f.write(pyxcontent)
rules = {
# fromext : function, toext
'.pyx' : (process_pyx, '.c'),
'.pyx.in' : (process_tempita_pyx, '.c'),
'.pxi.in' : (process_tempita_pxi, '.pxi'),
'.pxd.in' : (process_tempita_pxd, '.pxd'),
'.pyd.in' : (process_tempita_pyd, '.pyd'),
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
print(f'{fullfrompath} has not changed')
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print(f'Processing {fullfrompath}')
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
files = [x for x in os.listdir(root_dir) if not os.path.isdir(x)]
# .pxi or .pxi.in files are most likely dependencies for
# .pyx files, so we need to process them first
files.sort(key=lambda name: (name.endswith('.pxi') or
name.endswith('.pxi.in') or
name.endswith('.pxd.in')),
reverse=True)
for filename in files:
in_file = os.path.join(root_dir, filename + ".in")
for fromext, value in rules.items():
if filename.endswith(fromext):
if not value:
break
function, toext = value
if toext == '.c':
with open(os.path.join(root_dir, filename), 'rb') as f:
data = f.read()
m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(root_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
break
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conv_utils."""
import tensorflow.compat.v2 as tf
import itertools
from absl.testing import parameterized
import numpy as np
from keras.utils import conv_utils
def _get_const_output_shape(input_shape, dim):
return tuple([min(d, dim) for d in input_shape])
input_shapes = [
(0,),
(0, 0),
(1,),
(2,),
(3,),
(1, 0),
(0, 3),
(1, 1),
(1, 2),
(3, 1),
(2, 2),
(3, 3),
(1, 0, 1),
(5, 2, 3),
(3, 5, 6, 7, 0),
(3, 2, 2, 4, 4),
(1, 2, 3, 4, 7, 2),
]
class TestBasicConvUtilsTest(tf.test.TestCase):
def test_convert_data_format(self):
self.assertEqual('NCDHW', conv_utils.convert_data_format(
'channels_first', 5))
self.assertEqual('NCHW', conv_utils.convert_data_format(
'channels_first', 4))
self.assertEqual('NCW', conv_utils.convert_data_format('channels_first', 3))
self.assertEqual('NHWC', conv_utils.convert_data_format('channels_last', 4))
self.assertEqual('NWC', conv_utils.convert_data_format('channels_last', 3))
self.assertEqual('NDHWC', conv_utils.convert_data_format(
'channels_last', 5))
with self.assertRaises(ValueError):
conv_utils.convert_data_format('invalid', 2)
def test_normalize_tuple(self):
self.assertEqual(
(2, 2, 2),
conv_utils.normalize_tuple(2, n=3, name='strides', allow_zero=True))
self.assertEqual((2, 1, 2),
conv_utils.normalize_tuple((2, 1, 2),
n=3,
name='strides',
allow_zero=True))
self.assertEqual((
1,
2,
3,
), conv_utils.normalize_tuple((1, 2, 3), n=3, name='pool_size'))
self.assertEqual((3, 3, 3),
conv_utils.normalize_tuple(3, n=3, name='pool_size'))
with self.assertRaisesRegex(
ValueError,
r'including \{-1\} that does not satisfy the requirement `> 0`'):
conv_utils.normalize_tuple((3, -1, 3), n=3, name='negative_size')
with self.assertRaisesRegex(
ValueError,
r'The `strides` argument .* a tuple of 3 integers.* \(2, 1\)$'):
conv_utils.normalize_tuple((2, 1), n=3, name='strides', allow_zero=True)
with self.assertRaisesRegex(
ValueError,
r'The `kernel_size` argument .* tuple of 3 integers.* None$'):
conv_utils.normalize_tuple(None, n=3, name='kernel_size')
with self.assertRaisesRegex(ValueError,
r'including \{-4\} that does not .* `>= 0`'):
conv_utils.normalize_tuple(-4, n=3, name='strides', allow_zero=True)
with self.assertRaisesRegex(ValueError,
r'including \{0\} that does not .* `> 0`'):
conv_utils.normalize_tuple((0, 1, 2), n=3, name='pool_size')
def test_normalize_data_format(self):
self.assertEqual('channels_last',
conv_utils.normalize_data_format('Channels_Last'))
self.assertEqual('channels_first',
conv_utils.normalize_data_format('CHANNELS_FIRST'))
with self.assertRaises(ValueError):
conv_utils.normalize_data_format('invalid')
def test_normalize_padding(self):
self.assertEqual('same', conv_utils.normalize_padding('SAME'))
self.assertEqual('valid', conv_utils.normalize_padding('VALID'))
with self.assertRaises(ValueError):
conv_utils.normalize_padding('invalid')
def test_conv_output_length(self):
self.assertEqual(4, conv_utils.conv_output_length(4, 2, 'same', 1, 1))
self.assertEqual(2, conv_utils.conv_output_length(4, 2, 'same', 2, 1))
self.assertEqual(3, conv_utils.conv_output_length(4, 2, 'valid', 1, 1))
self.assertEqual(2, conv_utils.conv_output_length(4, 2, 'valid', 2, 1))
self.assertEqual(5, conv_utils.conv_output_length(4, 2, 'full', 1, 1))
self.assertEqual(3, conv_utils.conv_output_length(4, 2, 'full', 2, 1))
self.assertEqual(2, conv_utils.conv_output_length(5, 2, 'valid', 2, 2))
def test_conv_input_length(self):
self.assertEqual(3, conv_utils.conv_input_length(4, 2, 'same', 1))
self.assertEqual(2, conv_utils.conv_input_length(2, 2, 'same', 2))
self.assertEqual(4, conv_utils.conv_input_length(3, 2, 'valid', 1))
self.assertEqual(4, conv_utils.conv_input_length(2, 2, 'valid', 2))
self.assertEqual(3, conv_utils.conv_input_length(4, 2, 'full', 1))
self.assertEqual(4, conv_utils.conv_input_length(3, 2, 'full', 2))
def test_deconv_output_length(self):
self.assertEqual(4, conv_utils.deconv_output_length(4, 2, 'same', stride=1))
self.assertEqual(8, conv_utils.deconv_output_length(4, 2, 'same', stride=2))
self.assertEqual(5, conv_utils.deconv_output_length(
4, 2, 'valid', stride=1))
self.assertEqual(8, conv_utils.deconv_output_length(
4, 2, 'valid', stride=2))
self.assertEqual(3, conv_utils.deconv_output_length(4, 2, 'full', stride=1))
self.assertEqual(6, conv_utils.deconv_output_length(4, 2, 'full', stride=2))
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, 'same', output_padding=2, stride=1))
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, 'same', output_padding=1, stride=2))
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, 'valid', output_padding=2, stride=1))
self.assertEqual(
9,
conv_utils.deconv_output_length(
4, 2, 'valid', output_padding=1, stride=2))
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, 'full', output_padding=2, stride=1))
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, 'full', output_padding=1, stride=2))
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, 'same', output_padding=1, stride=1, dilation=2))
self.assertEqual(
12,
conv_utils.deconv_output_length(
4, 2, 'valid', output_padding=2, stride=2, dilation=3))
self.assertEqual(
6,
conv_utils.deconv_output_length(
4, 2, 'full', output_padding=2, stride=2, dilation=3))
@parameterized.parameters(input_shapes)
class TestConvUtils(tf.test.TestCase, parameterized.TestCase):
def test_conv_kernel_mask_fc(self, *input_shape):
padding = 'valid'
kernel_shape = input_shape
ndims = len(input_shape)
strides = (1,) * ndims
output_shape = _get_const_output_shape(input_shape, dim=1)
mask = np.ones(input_shape + output_shape, np.bool)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_diag(self, *input_shape):
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = (1,) * ndims
for padding in ['valid', 'same']:
mask = np.identity(int(np.prod(input_shape)), np.bool)
mask = np.reshape(mask, input_shape * 2)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_full_stride(self, *input_shape):
padding = 'valid'
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = tuple([max(d, 1) for d in input_shape])
output_shape = _get_const_output_shape(input_shape, dim=1)
mask = np.zeros(input_shape + output_shape, np.bool)
if all(d > 0 for d in mask.shape): # pylint: disable=not-an-iterable
mask[(0,) * len(output_shape)] = True
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_almost_full_stride(self, *input_shape):
padding = 'valid'
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = tuple([max(d - 1, 1) for d in input_shape])
output_shape = _get_const_output_shape(input_shape, dim=2)
mask = np.zeros(input_shape + output_shape, np.bool)
if all(d > 0 for d in mask.shape): # pylint: disable=not-an-iterable
for in_position in itertools.product(*[[0, d - 1] for d in input_shape]):
out_position = tuple([min(p, 1) for p in in_position])
mask[in_position + out_position] = True
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_rect_kernel(self, *input_shape):
padding = 'valid'
ndims = len(input_shape)
strides = (1,) * ndims
for d in range(ndims):
kernel_shape = [1] * ndims
kernel_shape[d] = input_shape[d]
output_shape = list(input_shape)
output_shape[d] = min(1, input_shape[d])
mask = np.identity(int(np.prod(input_shape)), np.bool)
mask = np.reshape(mask, input_shape * 2)
for p in itertools.product(*[range(input_shape[dim])
for dim in range(ndims)]):
p = list(p)
p[d] = slice(None)
mask[p * 2] = True
mask = np.take(mask, range(0, min(1, input_shape[d])), ndims + d)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_wrong_padding(self, *input_shape):
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = (1,) * ndims
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'valid'
)
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'same'
)
self.assertRaises(NotImplementedError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'full')
def test_conv_kernel_mask_wrong_dims(self, *input_shape):
kernel_shape = 1
strides = 1
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'valid'
)
ndims = len(input_shape)
kernel_shape = (2,) * (ndims + 1)
self.assertRaises(ValueError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'same')
strides = (1,) * ndims
self.assertRaises(ValueError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'valid')
kernel_shape = (1,) * ndims
strides = (2,) * (ndims - 1)
self.assertRaises(ValueError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'valid')
strides = (2,) * ndims
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'valid'
)
if __name__ == '__main__':
tf.test.main()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Provides a simple Daemon class to ease the process of forking a
python application on POSIX systems.
"""
import errno
import logging
import socket
from logging.handlers import RotatingFileHandler
import os
import signal
import sys
import time
import datetime
from ...server.util.Configuration import Configuration
class Daemon(object):
"""Daemon base class"""
def run(self):
"""Override. We are in the daemon at this point."""
def main(self):
"""Read the command line and either start or stop the daemon"""
if Configuration.action == 'start':
self.start()
elif Configuration.action == 'stop':
self.stop()
elif Configuration.action == 'status':
self.status()
else:
raise ValueError(Configuration.action)
def on_sigterm(self, signalnum, frame):
"""Handle segterm by treating as a keyboard interrupt"""
raise KeyboardInterrupt('SIGTERM')
def add_signal_handlers(self):
"""Register the sigterm handler"""
signal.signal(signal.SIGTERM, self.on_sigterm)
def start(self):
"""Initialize and run the daemon"""
self.check_pid()
self.add_signal_handlers()
self.start_logging()
try:
self.check_pid_writable()
self.check_server_accessibility()
self.daemonize()
except:
logging.exception("failed to start due to an exception")
raise
self.write_pid()
try:
try:
self.run()
except (KeyboardInterrupt, SystemExit):
pass
except OSError as exc:
logging.exception(str(exc))
pass
except:
logging.exception("stopping with an exception")
raise
finally:
self.remove_pid()
def stop(self):
"""Stop the running process"""
if Configuration.pidfile and os.path.exists(Configuration.pidfile):
file = open(Configuration.pidfile)
pid = int(file.read())
file.close()
os.kill(pid, signal.SIGTERM)
for n in range(10):
time.sleep(0.25)
try:
os.kill(pid, 0)
except OSError as why:
if why.errno == errno.ESRCH:
break
else:
raise
else:
sys.exit("pid %d did not die" % pid)
else:
sys.exit("not running")
def status(self):
self.check_pid(True)
def start_logging(self):
"""Configure the logging module"""
handler = RotatingFileHandler(
Configuration.logfile,
maxBytes=Configuration.logmaxmb * 1024 * 1024,
backupCount=Configuration.logbackups)
log = logging.getLogger()
log.setLevel(Configuration.loglevel)
handler.setFormatter(
logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
log.addHandler(handler)
def check_pid(self, status=False):
"""Check the pid file.
Stop using sys.exit() if another instance is already running.
If the pid file exists but no other instance is running,
delete the pid file.
"""
if not Configuration.pidfile:
return
if os.path.exists(Configuration.pidfile):
try:
pid = int(open(Configuration.pidfile, 'rb').read().decode('utf-8').strip())
except ValueError:
msg = 'pidfile %s contains a non-integer value' % Configuration.pidfile
sys.exit(msg)
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# The pid doesn't exist, so remove the stale pidfile.
os.remove(Configuration.pidfile)
else:
msg = ("failed to check status of process %s "
"from pidfile %s: %s" % (pid, Configuration.pidfile, err.strerror))
sys.exit(msg)
else:
mtime = os.stat(Configuration.pidfile).st_mtime
since = datetime.timedelta(seconds=(time.time() - mtime))
msg = 'instance [pid %s] seems to be running since %s [%s days]' % (pid, time.ctime(mtime), since.days)
sys.exit(msg)
elif status:
print('no instance seems to be running')
def check_pid_writable(self):
"""Verify the user has access to write to the pid file.
Note that the eventual process ID isn't known until after
daemonize(), so it's not possible to write the PID here.
"""
if not Configuration.pidfile:
return
if os.path.exists(Configuration.pidfile):
check = Configuration.pidfile
else:
check = os.path.dirname(Configuration.pidfile)
if not os.access(check, os.W_OK):
msg = 'unable to write to pidfile %s' % Configuration.pidfile
sys.exit(msg)
def check_server_accessibility(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((Configuration.host, Configuration.port))
except OSError as exc:
if exc.errno == 48:
print("address [%s:%d] already in use" % (Configuration.host, Configuration.port))
sys.exit(1)
def write_pid(self):
"""Write to the pid file"""
if Configuration.pidfile:
open(Configuration.pidfile, 'wb').write(str(os.getpid()).encode('utf-8'))
def remove_pid(self):
"""Delete the pid file"""
if Configuration.pidfile and os.path.exists(Configuration.pidfile):
os.remove(Configuration.pidfile)
def daemonize(self):
"""Detach from the terminal and continue as a daemon"""
if os.fork(): # launch child and...
os._exit(0) # kill off parent
os.setsid()
if os.fork(): # launch child and...
os._exit(0) # kill off parent again.
os.umask(63) # 077 in octal
null = os.open('/dev/null', os.O_RDWR)
for i in range(3):
try:
os.dup2(null, i)
except OSError as e:
if e.errno != errno.EBADF:
raise
os.close(null)
|
|
# coding: utf-8
"""
Main commands available for flatisfy.
"""
from __future__ import absolute_import, print_function, unicode_literals
import collections
import logging
import os
import flatisfy.filters
from flatisfy import database
from flatisfy import email
from flatisfy.models import flat as flat_model
from flatisfy.models import postal_code as postal_code_model
from flatisfy.models import public_transport as public_transport_model
from flatisfy import fetch
from flatisfy import tools
from flatisfy.filters import metadata
from flatisfy.web import app as web_app
import time
from ratelimit.exception import RateLimitException
LOGGER = logging.getLogger(__name__)
def filter_flats_list(config, constraint_name, flats_list, fetch_details=True, past_flats=None):
"""
Filter the available flats list. Then, filter it according to criteria.
:param config: A config dict.
:param constraint_name: The constraint name that the ``flats_list`` should
satisfy.
:param flats_list: The initial list of flat objects to filter.
:param fetch_details: Whether additional details should be fetched between
the two passes.
:param past_flats: The list of already fetched flats
:return: A dict mapping flat status and list of flat objects.
"""
# Add the flatisfy metadata entry and prepare the flat objects
flats_list = metadata.init(flats_list, constraint_name)
# Get the associated constraint from config
try:
constraint = config["constraints"][constraint_name]
except KeyError:
LOGGER.error(
"Missing constraint %s. Skipping filtering for these posts.",
constraint_name,
)
return {"new": [], "duplicate": [], "ignored": []}
first_pass_result = collections.defaultdict(list)
second_pass_result = collections.defaultdict(list)
third_pass_result = collections.defaultdict(list)
# Do a first pass with the available infos to try to remove as much
# unwanted postings as possible
if config["passes"] > 0:
first_pass_result = flatisfy.filters.first_pass(flats_list, constraint, config)
else:
first_pass_result["new"] = flats_list
# Load additional infos
if fetch_details:
past_ids = {x["id"]: x for x in past_flats} if past_flats else {}
for i, flat in enumerate(first_pass_result["new"]):
details = None
use_cache = past_ids.get(flat["id"])
if use_cache:
LOGGER.debug("Skipping details download for %s.", flat["id"])
details = use_cache
else:
if flat["id"].split("@")[1] in ["seloger", "leboncoin"]:
try:
details = fetch.fetch_details_rate_limited(config, flat["id"])
except RateLimitException:
time.sleep(60)
details = fetch.fetch_details_rate_limited(config, flat["id"])
else:
details = fetch.fetch_details(config, flat["id"])
first_pass_result["new"][i] = tools.merge_dicts(flat, details)
# Do a second pass to consolidate all the infos we found and make use of
# additional infos
if config["passes"] > 1:
second_pass_result = flatisfy.filters.second_pass(first_pass_result["new"], constraint, config)
else:
second_pass_result["new"] = first_pass_result["new"]
# Do a third pass to deduplicate better
if config["passes"] > 2:
third_pass_result = flatisfy.filters.third_pass(second_pass_result["new"], config)
else:
third_pass_result["new"] = second_pass_result["new"]
return {
"new": third_pass_result["new"],
"duplicate": (
first_pass_result["duplicate"] + second_pass_result["duplicate"] + third_pass_result["duplicate"]
),
"ignored": (first_pass_result["ignored"] + second_pass_result["ignored"] + third_pass_result["ignored"]),
}
def filter_fetched_flats(config, fetched_flats, fetch_details=True, past_flats={}):
"""
Filter the available flats list. Then, filter it according to criteria.
:param config: A config dict.
:param fetch_details: Whether additional details should be fetched between
the two passes.
:param fetched_flats: The initial dict mapping constraints to the list of
fetched flat objects to filter.
:return: A dict mapping constraints to a dict mapping flat status and list
of flat objects.
"""
for constraint_name, flats_list in fetched_flats.items():
fetched_flats[constraint_name] = filter_flats_list(
config,
constraint_name,
flats_list,
fetch_details,
past_flats.get(constraint_name, None),
)
return fetched_flats
def import_and_filter(config, load_from_db=False, new_only=False):
"""
Fetch the available flats list. Then, filter it according to criteria.
Finally, store it in the database.
:param config: A config dict.
:param load_from_db: Whether to load flats from database or fetch them
using Woob.
:return: ``None``.
"""
# Fetch and filter flats list
past_flats = fetch.load_flats_from_db(config)
if load_from_db:
fetched_flats = past_flats
else:
fetched_flats = fetch.fetch_flats(config)
# Do not fetch additional details if we loaded data from the db.
flats_by_status = filter_fetched_flats(
config,
fetched_flats=fetched_flats,
fetch_details=(not load_from_db),
past_flats=past_flats if new_only else {},
)
# Create database connection
get_session = database.init_db(config["database"], config["search_index"])
new_flats = []
result = []
LOGGER.info("Merging fetched flats in database...")
# Flatten the flats_by_status dict
flatten_flats_by_status = collections.defaultdict(list)
for flats in flats_by_status.values():
for status, flats_list in flats.items():
flatten_flats_by_status[status].extend(flats_list)
with get_session() as session:
# Set is_expired to true for all existing flats.
# This will be set back to false if we find them during importing.
for flat in session.query(flat_model.Flat).all():
flat.is_expired = True
for status, flats_list in flatten_flats_by_status.items():
# Build SQLAlchemy Flat model objects for every available flat
flats_objects = {flat_dict["id"]: flat_model.Flat.from_dict(flat_dict) for flat_dict in flats_list}
if flats_objects:
# If there are some flats, try to merge them with the ones in
# db
existing_flats_queries = session.query(flat_model.Flat).filter(
flat_model.Flat.id.in_(flats_objects.keys())
)
for each in existing_flats_queries.all():
# For each flat to merge, take care not to overwrite the
# status if the user defined it
flat_object = flats_objects[each.id]
if each.status in flat_model.AUTOMATED_STATUSES:
flat_object.status = getattr(flat_model.FlatStatus, status)
else:
flat_object.status = each.status
# Every flat we fetched isn't expired
flat_object.is_expired = False
# For each flat already in the db, merge it (UPDATE)
# instead of adding it
session.merge(flats_objects.pop(each.id))
# For any other flat, it is not already in the database, so we can
# just set the status field without worrying
for flat in flats_objects.values():
flat.status = getattr(flat_model.FlatStatus, status)
if flat.status == flat_model.FlatStatus.new:
new_flats.append(flat)
result.append(flat.id)
session.add_all(flats_objects.values())
if config["send_email"]:
email.send_notification(config, new_flats)
LOGGER.info(f"Found {len(result)} new flats.")
# Touch a file to indicate last update timestamp
ts_file = os.path.join(config["data_directory"], "timestamp")
with open(ts_file, "w"):
os.utime(ts_file, None)
LOGGER.info("Done!")
return result
def purge_db(config):
"""
Purge the database.
:param config: A config dict.
:return: ``None``
"""
get_session = database.init_db(config["database"], config["search_index"])
with get_session() as session:
# Delete every flat in the db
LOGGER.info("Purge all flats from the database.")
for flat in session.query(flat_model.Flat).all():
# Use (slower) deletion by object, to ensure whoosh index is
# updated
session.delete(flat)
LOGGER.info("Purge all postal codes from the database.")
session.query(postal_code_model.PostalCode).delete()
LOGGER.info("Purge all public transportations from the database.")
session.query(public_transport_model.PublicTransport).delete()
def serve(config):
"""
Serve the web app.
:param config: A config dict.
:return: ``None``, long-running process.
"""
app = web_app.get_app(config)
server = config.get("webserver", None)
if not server:
# Default webserver is quiet, as Bottle is used with Canister for
# standard logging
server = web_app.QuietWSGIRefServer
print("Launching web viewer running on http://%s:%s" % (config["host"], config["port"]))
app.run(host=config["host"], port=config["port"], server=server)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Writes events to disk in a logdir."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import threading
import time
import multiprocessing
import six
from .proto import event_pb2
from .record_writer import RecordWriter, directory_check
class EventsWriter(object):
'''Writes `Event` protocol buffers to an event file.'''
def __init__(self, file_prefix, filename_suffix=''):
'''
Events files have a name of the form
'/some/file/path/events.out.tfevents.[timestamp].[hostname]'
'''
self._file_name = file_prefix + ".out.tfevents." + str(time.time())[:10] + "." +\
socket.gethostname() + filename_suffix
self._num_outstanding_events = 0
self._py_recordio_writer = RecordWriter(self._file_name)
# Initialize an event instance.
self._event = event_pb2.Event()
self._event.wall_time = time.time()
self._event.file_version = 'brain.Event:2'
self._lock = threading.Lock()
self.write_event(self._event)
def write_event(self, event):
'''Append "event" to the file.'''
# Check if event is of type event_pb2.Event proto.
if not isinstance(event, event_pb2.Event):
raise TypeError("Expected an event_pb2.Event proto, "
" but got %s" % type(event))
return self._write_serialized_event(event.SerializeToString())
def _write_serialized_event(self, event_str):
with self._lock:
self._num_outstanding_events += 1
self._py_recordio_writer.write(event_str)
def flush(self):
'''Flushes the event file to disk.'''
with self._lock:
self._num_outstanding_events = 0
self._py_recordio_writer.flush()
return True
def close(self):
'''Call self.flush().'''
return_value = self.flush()
with self._lock:
self._py_recordio_writer.close()
return return_value
class EventFileWriter(object):
"""Writes `Event` protocol buffers to an event file.
The `EventFileWriter` class creates an event file in the specified directory,
and asynchronously writes Event protocol buffers to the file. The Event file
is encoded using the tfrecord format, which is similar to RecordIO.
"""
def __init__(self, logdir, max_queue_size=10, flush_secs=120, filename_suffix=''):
"""Creates a `EventFileWriter` and an event file to write to.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers, which are written to
disk via the add_event method.
The other arguments to the constructor control the asynchronous writes to
the event file:
Args:
logdir: A string. Directory where event file will be written.
max_queue_size: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
"""
self._logdir = logdir
directory_check(self._logdir)
self._event_queue = multiprocessing.Queue(max_queue_size)
self._ev_writer = EventsWriter(os.path.join(
self._logdir, "events"), filename_suffix)
self._flush_secs = flush_secs
self._closed = False
self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
flush_secs)
self._worker.start()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self._logdir
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file and a new write/flush worker
is created. Does nothing if the EventFileWriter was not closed.
"""
if self._closed:
self._closed = False
self._worker = _EventLoggerThread(
self._event_queue, self._ev_writer, self._flush_secs
)
self._worker.start()
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
if not self._closed:
self._event_queue.put(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
if not self._closed:
self._ev_writer.flush()
def close(self):
"""Performs a final flush of the event file to disk, stops the
write/flush worker and closes the file. Call this method when you do not
need the summary writer anymore.
"""
if not self._closed:
self.flush()
self._worker.stop()
self._ev_writer.close()
self._event_queue.close()
self._event_queue = None # this is critical
self._worker = None # this is critical too
self._closed = True
class _EventLoggerThread(threading.Thread):
"""Thread that logs events."""
def __init__(self, queue, record_writer, flush_secs):
"""Creates an _EventLoggerThread.
Args:
queue: A Queue from which to dequeue data.
record_writer: An data writer. Used to log brain events for
the visualizer.
flush_secs: How often, in seconds, to flush the
pending file to disk.
"""
threading.Thread.__init__(self)
self.daemon = True
self._queue = queue
self._record_writer = record_writer
self._flush_secs = flush_secs
# The first data will be flushed immediately.
self._next_flush_time = 0
self._has_pending_data = False
self._shutdown_signal = object()
def stop(self):
self._queue.put(self._shutdown_signal)
self.join()
def run(self):
# Here wait on the queue until an data appears, or till the next
# time to flush the writer, whichever is earlier. If we have an
# data, write it. If not, an empty queue exception will be raised
# and we can proceed to flush the writer.
while True:
now = time.time()
queue_wait_duration = self._next_flush_time - now
data = None
try:
if queue_wait_duration > 0:
data = self._queue.get(True, queue_wait_duration)
else:
data = self._queue.get(False)
if type(data) == type(self._shutdown_signal):
return
self._record_writer.write_event(data)
self._has_pending_data = True
except six.moves.queue.Empty:
pass
now = time.time()
if now > self._next_flush_time:
if self._has_pending_data:
# Small optimization - if there are no pending data,
# there's no need to flush, since each flush can be
# expensive (e.g. uploading a new file to a server).
self._record_writer.flush()
self._has_pending_data = False
# Do it again in flush_secs.
self._next_flush_time = now + self._flush_secs
|
|
"""The Minecraft Server integration."""
from __future__ import annotations
import asyncio
from datetime import datetime, timedelta
import logging
from typing import Any
from mcstatus.server import MinecraftServer as MCStatus
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import helpers
from .const import DOMAIN, MANUFACTURER, SCAN_INTERVAL, SIGNAL_NAME_PREFIX
PLATFORMS = ["binary_sensor", "sensor"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the Minecraft Server component."""
return True
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry) -> bool:
"""Set up Minecraft Server from a config entry."""
domain_data = hass.data.setdefault(DOMAIN, {})
# Create and store server instance.
unique_id = config_entry.unique_id
_LOGGER.debug(
"Creating server instance for '%s' (%s)",
config_entry.data[CONF_NAME],
config_entry.data[CONF_HOST],
)
server = MinecraftServer(hass, unique_id, config_entry.data)
domain_data[unique_id] = server
await server.async_update()
server.start_periodic_update()
# Set up platforms.
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_unload_entry(
hass: HomeAssistantType, config_entry: ConfigEntry
) -> bool:
"""Unload Minecraft Server config entry."""
unique_id = config_entry.unique_id
server = hass.data[DOMAIN][unique_id]
# Unload platforms.
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in PLATFORMS
]
)
# Clean up.
server.stop_periodic_update()
hass.data[DOMAIN].pop(unique_id)
return True
class MinecraftServer:
"""Representation of a Minecraft server."""
# Private constants
_MAX_RETRIES_STATUS = 3
def __init__(
self, hass: HomeAssistantType, unique_id: str, config_data: ConfigType
) -> None:
"""Initialize server instance."""
self._hass = hass
# Server data
self.unique_id = unique_id
self.name = config_data[CONF_NAME]
self.host = config_data[CONF_HOST]
self.port = config_data[CONF_PORT]
self.online = False
self._last_status_request_failed = False
self.srv_record_checked = False
# 3rd party library instance
self._mc_status = MCStatus(self.host, self.port)
# Data provided by 3rd party library
self.version = None
self.protocol_version = None
self.latency_time = None
self.players_online = None
self.players_max = None
self.players_list = None
# Dispatcher signal name
self.signal_name = f"{SIGNAL_NAME_PREFIX}_{self.unique_id}"
# Callback for stopping periodic update.
self._stop_periodic_update = None
def start_periodic_update(self) -> None:
"""Start periodic execution of update method."""
self._stop_periodic_update = async_track_time_interval(
self._hass, self.async_update, timedelta(seconds=SCAN_INTERVAL)
)
def stop_periodic_update(self) -> None:
"""Stop periodic execution of update method."""
self._stop_periodic_update()
async def async_check_connection(self) -> None:
"""Check server connection using a 'status' request and store connection status."""
# Check if host is a valid SRV record, if not already done.
if not self.srv_record_checked:
self.srv_record_checked = True
srv_record = await helpers.async_check_srv_record(self._hass, self.host)
if srv_record is not None:
_LOGGER.debug(
"'%s' is a valid Minecraft SRV record ('%s:%s')",
self.host,
srv_record[CONF_HOST],
srv_record[CONF_PORT],
)
# Overwrite host, port and 3rd party library instance
# with data extracted out of SRV record.
self.host = srv_record[CONF_HOST]
self.port = srv_record[CONF_PORT]
self._mc_status = MCStatus(self.host, self.port)
# Ping the server with a status request.
try:
await self._hass.async_add_executor_job(
self._mc_status.status, self._MAX_RETRIES_STATUS
)
self.online = True
except OSError as error:
_LOGGER.debug(
"Error occurred while trying to check the connection to '%s:%s' - OSError: %s",
self.host,
self.port,
error,
)
self.online = False
async def async_update(self, now: datetime = None) -> None:
"""Get server data from 3rd party library and update properties."""
# Check connection status.
server_online_old = self.online
await self.async_check_connection()
server_online = self.online
# Inform user once about connection state changes if necessary.
if server_online_old and not server_online:
_LOGGER.warning("Connection to '%s:%s' lost", self.host, self.port)
elif not server_online_old and server_online:
_LOGGER.info("Connection to '%s:%s' (re-)established", self.host, self.port)
# Update the server properties if server is online.
if server_online:
await self._async_status_request()
# Notify sensors about new data.
async_dispatcher_send(self._hass, self.signal_name)
async def _async_status_request(self) -> None:
"""Request server status and update properties."""
try:
status_response = await self._hass.async_add_executor_job(
self._mc_status.status, self._MAX_RETRIES_STATUS
)
# Got answer to request, update properties.
self.version = status_response.version.name
self.protocol_version = status_response.version.protocol
self.players_online = status_response.players.online
self.players_max = status_response.players.max
self.latency_time = status_response.latency
self.players_list = []
if status_response.players.sample is not None:
for player in status_response.players.sample:
self.players_list.append(player.name)
self.players_list.sort()
# Inform user once about successful update if necessary.
if self._last_status_request_failed:
_LOGGER.info(
"Updating the properties of '%s:%s' succeeded again",
self.host,
self.port,
)
self._last_status_request_failed = False
except OSError as error:
# No answer to request, set all properties to unknown.
self.version = None
self.protocol_version = None
self.players_online = None
self.players_max = None
self.latency_time = None
self.players_list = None
# Inform user once about failed update if necessary.
if not self._last_status_request_failed:
_LOGGER.warning(
"Updating the properties of '%s:%s' failed - OSError: %s",
self.host,
self.port,
error,
)
self._last_status_request_failed = True
class MinecraftServerEntity(Entity):
"""Representation of a Minecraft Server base entity."""
def __init__(
self, server: MinecraftServer, type_name: str, icon: str, device_class: str
) -> None:
"""Initialize base entity."""
self._server = server
self._name = f"{server.name} {type_name}"
self._icon = icon
self._unique_id = f"{self._server.unique_id}-{type_name}"
self._device_info = {
"identifiers": {(DOMAIN, self._server.unique_id)},
"name": self._server.name,
"manufacturer": MANUFACTURER,
"model": f"Minecraft Server ({self._server.version})",
"sw_version": self._server.protocol_version,
}
self._device_class = device_class
self._extra_state_attributes = None
self._disconnect_dispatcher = None
@property
def name(self) -> str:
"""Return name."""
return self._name
@property
def unique_id(self) -> str:
"""Return unique ID."""
return self._unique_id
@property
def device_info(self) -> dict[str, Any]:
"""Return device information."""
return self._device_info
@property
def device_class(self) -> str:
"""Return device class."""
return self._device_class
@property
def icon(self) -> str:
"""Return icon."""
return self._icon
@property
def should_poll(self) -> bool:
"""Disable polling."""
return False
async def async_update(self) -> None:
"""Fetch data from the server."""
raise NotImplementedError()
async def async_added_to_hass(self) -> None:
"""Connect dispatcher to signal from server."""
self._disconnect_dispatcher = async_dispatcher_connect(
self.hass, self._server.signal_name, self._update_callback
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect dispatcher before removal."""
self._disconnect_dispatcher()
@callback
def _update_callback(self) -> None:
"""Triggers update of properties after receiving signal from server."""
self.async_schedule_update_ha_state(force_refresh=True)
|
|
#!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import os.path
import base64
import httplib
import sys
import hashlib
import terracoin_hash
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
#hash1 = hashlib.sha256()
#hash1.update(blk_hdr)
#hash1_o = hash1.digest()
#hash2 = hashlib.sha256()
#hash2.update(hash1_o)
#hash2_o = hash2.digest()
#return hash2_o
pow_hash = terracoin_hash.getPoWHash(blk_hdr)
return pow_hash
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + inMagic.encode('hex'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'cee2caff'
if 'genesis' not in settings:
settings['genesis'] = '00000bafbc94add76cb75e2ec92894837288a481e5c005f6563d91623bf8bc2c'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
|
|
import pfacets.np as np
import collections
import copy
import errno
import fnmatch
import glob
import imp
import itertools
import os
import os.path as osp
import re
import types
###################################
########### UTILITY FUNCTIONS
###################################
def autonew_dict():
def inner():
return collections.defaultdict(inner)
return collections.defaultdict(inner)
class data:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def generate_filtered(gen, filt, num):
"""Generate a filtered list of items from a generator.
`gen.next` is called repeatedly and each item is passed to `filt`. Items for
which `filt` returns true are included in the return list. Runs until `num`
items are in the list.
Parameters
----------
gen : generator
Generates candidates for inclusion in return list.
filt : callable
Should take a single argument and return a value castable to Boolean.
num : int
Number of items to return.
Returns
-------
list
`num` items created by `gen` and validated by `filt`
"""
res = []
while len(res) < num:
x = gen.next()
filt(x) and res.append(x)
return res
def get_recursive(d, default, *keys):
"""Attempt to retrieve a value from a nested dictionary.
Parameters
----------
d : dict
A dictionary, possibly containing other dictionaries.
default
The value to return if `keys` does not correspond to a dictionary path.
*keys : hashable obj
The keys will be looked up in sequence, starting from `d`.
Returns
-------
val or `default`
If the series of keys does not correspond to a dictionary path, return `default`.
"""
try:
for k in keys:
d = d[k]
return curr
except:
return default
def glob_recursive(path, pat):
"""Return all matches of a pattern recursively located within a directory.
Parameters
----------
path : str
path to the target directory
pat : str
a file matching pattern
Returns
-------
list
list of full paths to the matches
"""
paths = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, pat):
paths.append(os.path.join(root, filename))
return paths
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def map_object_to_dict(obj, mapping):
d = {}
od = obj if type(obj) is dict else obj.__dict__
for attrname,keys in mapping.items():
# if getattr(obj, attrname, None) != None:
if od.get(attrname, None) != None:
curr = d
for k in keys[0:-1]:
curr = curr.setdefault(k, {})
curr[keys[-1]] = od[attrname]
# reduce(lambda d,k: d[k], keys[0:-1], d)[keys[-1]] = getattr(obj, attrname)
return d
def merge(*dicts, **kwargs):
"""Merge an arbitrary number of dictionaries.
Values in dictionaries occurring later in the argument list have priority.
Parameters
----------
mergelists : bool (optional)
If True, then lists will be concatenated rather than overwritten
*dicts : tuple(obj)
Arbitrary number of configuration dictionaries.
"""
kwargs.setdefault('mergelists', False)
def mergeInner(config1, config2):
config1 = copy.deepcopy(config1)
for k,v in config2.items():
if isinstance(v, dict) and config1.has_key(k) and isinstance(config1.get(k), dict):
config1[k] = mergeInner(config1[k], v)
elif (kwargs['mergelists'] and isinstance(v, list) and isinstance(config1.get(k), list)):
config1[k] += v
else:
config1[k] = config2[k]
return config1
return reduce(mergeInner, dicts, {})
def mixin(instance, klass):
"""Make a classes methods available to a single object.
"Mixes in" a class' methods to a specific object. Like inheritance, but
applied to one object rather than a whole class. Works only for new-style
classes.
Parameters
----------
instance : object
Any object.
klass : class
Any class.
Returns
-------
None
"""
for name in klass.__dict__:
if name.startswith('__') and name.endswith('__'):
continue
elif not type(klass.__dict__[name])==types.FunctionType:
continue
else:
instance.__dict__[name]=klass.__dict__[name].__get__(instance)
def numeric_path_sort(paths, basename=False):
if basename:
key = lambda p: int(re.search(r'\d+', osp.basename(p)).group(0))
else:
key = lambda p: int(re.search(r'\d+', p).group(0))
return sorted(paths, key=key)
def clear_path(path):
if os.path.exists(path):
os.remove(path)
def mkdir_p(path):
try:
if not os.path.exists(path):
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def set_attributes_from_dicts(obj, *dicts, **kwargs):
"""Merge provided dictionaries into attributes on object.
All provides `dicts` are recursively merged into a single dict. Then each
key-value pair is set as an attribute on `obj`.
Parameters
----------
obj : object
Any object.
dicts : dict
Dictionaries containing settings for target object.
Returns
-------
dict
The single dictionary resulting from recursive merge of `dicts`.
"""
if kwargs.get('conservative', False):
keys = dicts[0].keys()
dicts = [ dict([(k, d[k]) for k in keys if k in d]) for d in dicts ]
settings = merge(*dicts)
for k,v in settings.items():
setattr(obj, k, v)
return settings
def super_init_with_defaults(klass, obj, kwargs):
super(klass, obj).__init__( **merge(klass.defaults, kwargs) )
def symlink_f(src, dest):
"""Create a symlink, overwriting a preexisting one.
Parameters
----------
src : str
path to target file
dest : str
path to symlink
"""
try:
os.remove(dest)
except:
pass
os.symlink(src, dest)
###################################
########### CONFIG
###################################
def merge_config(args, argmap, filepath, defaults):
config_module = load_local_module(path=filepath)
local_config = config_module.config if config_module is not None else {}
cli_config = map_object_to_dict(args, argmap)
return merge(defaults, local_config, cli_config)
def load_local_module(path=None, default_name = None,
default_paths=[os.path.expanduser('~'), '.']):
path = os.path.normpath(path or os.path.expanduser("~/{0}".format(default_name)))
local_config = None
if os.path.exists(path):
local_config = imp.load_source('local_config', path)
return local_config
def get_num_intervals(start, fin, mod):
"""Determine the number of cycle repetitions that occur in between two indices"""
inter = fin - start
q,r = divmod(inter, mod)
if q == r:
return q
else:
offset = (mod - (start % mod)) % mod
return len(xrange(offset, inter, mod))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class VariableScopeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
@test_util.run_in_graph_and_eager_modes()
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
@test_util.run_in_graph_and_eager_modes()
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
if context.in_graph_mode():
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
@test_util.run_in_graph_and_eager_modes()
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
@test_util.run_in_graph_and_eager_modes()
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes()
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
@test_util.run_in_graph_and_eager_modes()
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
@test_util.run_in_graph_and_eager_modes()
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = variable_scope.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
if context.in_graph_mode():
with self.assertRaises(TypeError):
variable_scope.get_variable("x4", initializer={})
else:
with self.assertRaises(errors.InvalidArgumentError):
variable_scope.get_variable("x4", initializer={})
@test_util.run_in_graph_and_eager_modes()
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
# TODO(alive): support variable partitioning/caching in eager mode.
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(v2_not_cached.value().device.startswith(
caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(v2_identity_device.value().device.startswith(
caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
@test_util.run_in_graph_and_eager_modes()
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
self.evaluate(variables_lib.variables_initializer([u, w, x, y, z]))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
self.assertAllClose(self.evaluate(losses[1]), 0.4)
self.assertAllClose(self.evaluate(losses[2]), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
v = variable_scope.get_variable("v",
[]) # "v" is alredy there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
@test_util.run_in_graph_and_eager_modes()
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
def testControlDeps(self):
with self.test_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testControlFlow(self):
with self.test_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
@test_util.run_in_graph_and_eager_modes()
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("bar"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
if context.in_graph_mode():
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes()
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower4/scope/")
with variable_scope.variable_scope("tower5"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "tower5/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower5/bar/scope/")
with variable_scope.variable_scope("tower6"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes()
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/")
if context.in_graph_mode():
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/")
with ops.name_scope("testVarScopeNameScope2"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/")
if context.in_graph_mode():
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("testVarScopeNameScope3"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope3/scope2/")
def testVarScopeOriginalNameScope(self):
with self.test_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
def testVarScopeObjectReuse(self):
with self.test_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
@test_util.run_in_graph_and_eager_modes()
def testVarScopeGetOrCreateReuse(self):
def test_value(value):
x = constant_op.constant(value)
with variable_scope.variable_scope("testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope("testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
def testVarOpScope(self):
with self.test_session():
with ops.name_scope("testVarOpScope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/")
with ops.name_scope("testVarOpScope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/")
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.test_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
def testVarOpScopeUniqueNamesWithJump(self):
with self.test_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_2/w:0")
def testVarOpScopeReuse(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarScopeGetVar(self):
with self.test_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
def testVarScopeOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarScopeNestedOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
def testVarOpScopeReuseParam(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarOpScopeReuseError(self):
with self.test_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
def testVarOpScopeOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarOpScopeNestedOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes()
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
@test_util.run_in_graph_and_eager_modes()
def testGetCollection(self):
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable("testGetCollection_b", [], trainable=False)
with variable_scope.variable_scope("testGetCollection_foo_") as scope1:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable("testGetCollection_b", [],
trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo_/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0"
])
with variable_scope.variable_scope("testGetCollection_foo") as scope2:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable("testGetCollection_b", [],
trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_a:0", "testGetCollection_b:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0",
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], [
"testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_a:0"
])
@test_util.run_in_graph_and_eager_modes()
def testGetTrainableVariables(self):
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable("testGetTrainableVariables_c", [],
trainable=False)
self.assertEqual([v.name
for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/"
"testGetTrainableVariables_b:0"])
@test_util.run_in_graph_and_eager_modes()
def testGetGlobalVariables(self):
_ = variable_scope.get_variable("testGetGlobalVariables_a", [])
with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope:
_ = variable_scope.get_variable("testGetGlobalVariables_b", [])
self.assertEqual([v.name
for v in scope.global_variables()],
["testGetGlobalVariables_foo/"
"testGetGlobalVariables_b:0"])
@test_util.run_in_graph_and_eager_modes()
def testGetLocalVariables(self):
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable(
"c", [])
self.assertEqual([v.name
for v in scope.local_variables()], ["foo/b:0"])
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
def testInitFromNonInitializer(self):
with self.test_session() as sess:
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="x%d" % i,
shape=(3, 4),
dtype=dtype,
partitioner=axis0_into2_partitioner)
y = variable_scope.get_variable(
name="y%d" % i,
shape=(6, 4),
dtype=dtype,
partitioner=axis0_into2_partitioner,
initializer=init_ops.zeros_initializer(dtype=dtype))
variables_lib.global_variables_initializer().run()
# x and y would become var list after partition
val_x = sess.run(list(x))
val_y = sess.run(list(y))
self.assertAllEqual(val_x, val_y)
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope(
"prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope(
"sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v,
(((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3]))
+ ((np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
class PartitionInfoTest(test.TestCase):
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
if __name__ == "__main__":
test.main()
|
|
"""
Support for simple yaml persisted config dicts.
Usual building of the configuration
- with datadir (default or from option)
- create dir if not available
- create minimal config if not available
- load config
- App is initialized with an (possibly empty) config
which is recursively updated (w/o overriding values) with its own default config
- Services are initialized with app and
recursively update (w/o overriding values) the config with their default config
todo:
datadir
"""
import os
import copy
import click
from devp2p.utils import update_config_with_defaults # updates only missing entries
import errno
import yaml
import ethereum.slogging as slogging
from devp2p.service import BaseService
from devp2p.app import BaseApp
from accounts import mk_random_privkey
from ethereum.keys import decode_hex
from ethereum.utils import parse_int_or_hex, remove_0x_head
CONFIG_FILE_NAME = 'config.yaml'
log = slogging.get_logger('config')
default_data_dir = click.get_app_dir('pyethapp')
def get_config_path(data_dir=default_data_dir):
return os.path.join(data_dir, CONFIG_FILE_NAME)
default_config_path = get_config_path(default_data_dir)
def setup_data_dir(data_dir=None):
config_file_path = get_config_path(data_dir)
if data_dir and not os.path.exists(config_file_path):
try:
os.makedirs(data_dir)
except OSError as ex:
# Ignore "File exists" errors
if ex.errno != errno.EEXIST:
raise
setup_required_config(data_dir)
required_config = dict(node=dict(privkey_hex=''))
def check_config(config, required_config=required_config):
"check if values are set"
for k, v in required_config.items():
if not config.get(k):
return False
if isinstance(v, dict):
if not check_config(config[k], v):
return False
return True
def validate_alt_config_file(ctx, param, value):
"""Used as a click.callback
Check if config file @value can be used as a pyethapp config"
"""
if value:
try:
yaml_ = load_config(value)
except IOError, e:
raise click.BadParameter(str(e))
else:
if not isinstance(yaml_, dict):
raise click.BadParameter('content of config should be an yaml dictionary')
assert not yaml_.get('eth', {}).get('privkey_hex'), 'eth.privkey_hex found'
return value
def setup_required_config(data_dir=default_data_dir):
"writes minimal necessary config to data_dir"
log.info('setup default config', path=data_dir)
config_path = get_config_path(data_dir)
assert not os.path.exists(config_path)
if not os.path.exists(data_dir):
setup_data_dir(data_dir)
config = dict(node=dict(privkey_hex=mk_random_privkey().encode('hex')))
write_config(config, config_path)
def get_default_config(services):
"collect default_config from services"
config = dict()
assert isinstance(services, list)
for s in services:
assert isinstance(s, (BaseService, BaseApp)) or issubclass(s, (BaseService, BaseApp))
update_config_with_defaults(config, s.default_config)
return config
def load_config(path=default_config_path):
"""Load config from string or file like object `path`."""
log.info('loading config', path=path)
if os.path.exists(path):
if os.path.isdir(path):
path = get_config_path(path)
return yaml.load(open(path))
return dict()
def write_config(config, path=default_config_path):
"""Load config from string or file like object `f`, discarding the one
already in place.
"""
assert path
log.info('writing config', path=path)
with open(path, 'wb') as f:
yaml.dump(config, f)
def set_config_param(config, s, strict=True):
"""Set a specific config parameter.
:param s: a string of the form ``a.b.c=d`` which will set the value of
``config['a']['b']['b']`` to ``yaml.load(d)``
:param strict: if `True` will only override existing values.
:raises: :exc:`ValueError` if `s` is malformed or the value to set is not
valid YAML
"""
# fixme add += support
try:
param, value = s.split('=', 1)
keys = param.split('.')
except ValueError:
raise ValueError('Invalid config parameter')
d = config
for key in keys[:-1]:
if strict and key not in d:
raise KeyError('Unknown config option %s' % param)
d = d.setdefault(key, {})
try:
if strict and keys[-1] not in d:
raise KeyError('Unknown config option %s' % param)
d[keys[-1]] = yaml.load(value)
except yaml.parser.ParserError:
raise ValueError('Invalid config value')
return config
def dump_config(config):
"""mask privkey_hex entries in config and print as yaml
"""
konfig = copy.deepcopy(config)
mask = lambda key: "{}{}{}".format(key[:2], "*" * (len(key) - 4), key[-2:])
if len(konfig.get('accounts', {}).get('privkeys_hex', [])):
konfig['accounts']['privkeys_hex'] = [mask(key) for key in konfig['accounts']['privkeys_hex']]
if len(konfig.get('node', {}).get('privkey_hex')):
konfig['node']['privkey_hex'] = mask(konfig['node']['privkey_hex'])
print yaml.dump(konfig)
def update_config_from_genesis_json(config, genesis_json_filename_or_dict):
""" Sets the genesis configuration.
Note: This function will not copy config, it will be modified in place and
then returned.
Args:
config (dict): The app full configuration.
genesis_json_filename_or_dict: The path to a yaml file or a dictionary
with the genesis configuration, the required keys are:
- alloc: a mapping from address to balance
- difficulty: the difficulty hex encoded
- timestamp: the timestamp hex encoded
- extraData: extra binary data hex encoded
- gasLimit: gas limit hex encoded
- mixhash: mixhash hex encoded
- parentHash: the parent hash hex encoded
- coinbase: coinbase hex encoded
- nonce: nonce hex encoded
Returns:
dict: The first function argument.
"""
if isinstance(genesis_json_filename_or_dict, dict):
genesis_dict = genesis_json_filename_or_dict
else:
with open(genesis_json_filename_or_dict, 'r') as genesis_json_file:
genesis_dict = yaml.load(genesis_json_file)
valid_keys = set((
'alloc', 'difficulty', 'timestamp', 'extraData', 'gasLimit', 'mixhash',
'parentHash', 'coinbase', 'nonce',
))
unknown_keys = set(genesis_dict.keys()) - valid_keys
if unknown_keys:
raise ValueError('genesis_dict contains invalid keys.')
config.setdefault('eth', {}).setdefault('block', {})
ethblock_config = config['eth']['block']
def _dec(data):
return decode_hex(remove_0x_head(data))
if 'alloc' in genesis_dict:
ethblock_config['GENESIS_INITIAL_ALLOC'] = genesis_dict['alloc']
if 'difficulty' in genesis_dict:
ethblock_config['GENESIS_DIFFICULTY'] = parse_int_or_hex(genesis_dict['difficulty'])
if 'timestamp' in genesis_dict:
ethblock_config['GENESIS_TIMESTAMP'] = parse_int_or_hex(genesis_dict['timestamp'])
if 'extraData' in genesis_dict:
ethblock_config['GENESIS_EXTRA_DATA'] = _dec(genesis_dict['extraData'])
if 'gasLimit' in genesis_dict:
ethblock_config['GENESIS_GAS_LIMIT'] = parse_int_or_hex(genesis_dict['gasLimit'])
if 'mixhash' in genesis_dict:
ethblock_config['GENESIS_MIXHASH'] = _dec(genesis_dict['mixhash'])
if 'parentHash' in genesis_dict:
ethblock_config['GENESIS_PREVHASH'] = _dec(genesis_dict['parentHash'])
if 'coinbase' in genesis_dict:
ethblock_config['GENESIS_COINBASE'] = _dec(genesis_dict['coinbase'])
if 'nonce' in genesis_dict:
ethblock_config['GENESIS_NONCE'] = _dec(genesis_dict['nonce'])
return config
|
|
#
# Copyright 2014 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""
Core client functionality, common across all API requests (including performing
HTTP requests).
"""
import base64
import collections
from datetime import datetime
from datetime import timedelta
import hashlib
import hmac
import requests
import random
import time
import googlemaps
try: # Python 3
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
_USER_AGENT = "GoogleGeoApiClientPython/%s" % googlemaps.__version__
_DEFAULT_BASE_URL = "https://maps.googleapis.com"
_RETRIABLE_STATUSES = set([500, 503, 504])
class Client(object):
"""Performs requests to the Google Maps API web services."""
def __init__(self, key=None, client_id=None, client_secret=None,
timeout=None, connect_timeout=None, read_timeout=None,
retry_timeout=60, requests_kwargs=None,
queries_per_second=10):
"""
:param key: Maps API key. Required, unless "client_id" and
"client_secret" are set.
:type key: string
:param client_id: (for Maps API for Work customers) Your client ID.
:type client_id: string
:param client_secret: (for Maps API for Work customers) Your client
secret (base64 encoded).
:type client_secret: string
:param timeout: Combined connect and read timeout for HTTP requests, in
seconds. Specify "None" for no timeout.
:type timeout: int
:param connect_timeout: Connection timeout for HTTP requests, in
seconds. You should specify read_timeout in addition to this option.
Note that this requires requests >= 2.4.0.
:type connect_timeout: int
:param read_timeout: Read timeout for HTTP requests, in
seconds. You should specify connect_timeout in addition to this
option. Note that this requires requests >= 2.4.0.
:type read_timeout: int
:param retry_timeout: Timeout across multiple retriable requests, in
seconds.
:type retry_timeout: int
:param queries_per_second: Number of queries per second permitted.
If the rate limit is reached, the client will sleep for the
appropriate amount of time before it runs the current query.
:type queries_per_second: int
:raises ValueError: when either credentials are missing, incomplete
or invalid.
:raises NotImplementedError: if connect_timeout and read_timeout are
used with a version of requests prior to 2.4.0.
:param requests_kwargs: Extra keyword arguments for the requests
library, which among other things allow for proxy auth to be
implemented. See the official requests docs for more info:
http://docs.python-requests.org/en/latest/api/#main-interface
:type requests_kwargs: dict
"""
if not key and not (client_secret and client_id):
raise ValueError("Must provide API key or enterprise credentials "
"when creating client.")
if key and not key.startswith("AIza"):
raise ValueError("Invalid API key provided.")
self.key = key
if timeout and (connect_timeout or read_timeout):
raise ValueError("Specify either timeout, or connect_timeout " +
"and read_timeout")
if connect_timeout and read_timeout:
# Check that the version of requests is >= 2.4.0
chunks = requests.__version__.split(".")
if chunks[0] < 2 or (chunks[0] == 2 and chunks[1] < 4):
raise NotImplementedError("Connect/Read timeouts require "
"requests v2.4.0 or higher")
self.timeout = (connect_timeout, read_timeout)
else:
self.timeout = timeout
self.client_id = client_id
self.client_secret = client_secret
self.retry_timeout = timedelta(seconds=retry_timeout)
self.requests_kwargs = requests_kwargs or {}
self.requests_kwargs.update({
"headers": {"User-Agent": _USER_AGENT},
"timeout": self.timeout,
"verify": True, # NOTE(cbro): verify SSL certs.
})
self.sent_times = collections.deque("", queries_per_second)
def _get(self, url, params, first_request_time=None, retry_counter=0,
base_url=_DEFAULT_BASE_URL, accepts_clientid=True, extract_body=None):
"""Performs HTTP GET request with credentials, returning the body as
JSON.
:param url: URL path for the request. Should begin with a slash.
:type url: string
:param params: HTTP GET parameters.
:type params: dict or list of key/value tuples
:param first_request_time: The time of the first request (None if no retries
have occurred).
:type first_request_time: datetime.datetime
:param retry_counter: The number of this retry, or zero for first attempt.
:type retry_counter: int
:param base_url: The base URL for the request. Defaults to the Maps API
server. Should not have a trailing slash.
:type base_url: string
:param accepts_clientid: Whether this call supports the client/signature
params. Some APIs require API keys (e.g. Roads).
:type accepts_clientid: bool
:param extract_body: A function that extracts the body from the request.
If the request was not successful, the function should raise a
googlemaps.HTTPError or googlemaps.ApiError as appropriate.
:type extract_body: function
:raises ApiError: when the API returns an error.
:raises Timeout: if the request timed out.
:raises TransportError: when something went wrong while trying to
exceute a request.
"""
if not first_request_time:
first_request_time = datetime.now()
elapsed = datetime.now() - first_request_time
if elapsed > self.retry_timeout:
raise googlemaps.exceptions.Timeout()
if retry_counter > 0:
# 0.5 * (1.5 ^ i) is an increased sleep time of 1.5x per iteration,
# starting at 0.5s when retry_counter=0. The first retry will occur
# at 1, so subtract that first.
delay_seconds = 0.5 * 1.5 ** (retry_counter - 1)
# Jitter this value by 50% and pause.
time.sleep(delay_seconds * (random.random() + 0.5))
authed_url = self._generate_auth_url(url, params, accepts_clientid)
try:
resp = requests.get(base_url + authed_url, **self.requests_kwargs)
except requests.exceptions.Timeout:
raise googlemaps.exceptions.Timeout()
except Exception as e:
raise googlemaps.exceptions.TransportError(e)
if resp.status_code in _RETRIABLE_STATUSES:
# Retry request.
return self._get(url, params, first_request_time, retry_counter + 1,
base_url, accepts_clientid, extract_body)
# Check if the time of the nth previous query (where n is queries_per_second)
# is under a second ago - if so, sleep for the difference.
if self.sent_times and len(self.sent_times) == self.sent_times.maxlen:
elapsed_since_earliest = time.time() - self.sent_times[0]
if elapsed_since_earliest < 1:
time.sleep(1 - elapsed_since_earliest)
try:
if extract_body:
result = extract_body(resp)
else:
result = self._get_body(resp)
self.sent_times.append(time.time())
return result
except googlemaps.exceptions._RetriableRequest:
# Retry request.
return self._get(url, params, first_request_time, retry_counter + 1,
base_url, accepts_clientid, extract_body)
def _get_body(self, resp):
if resp.status_code != 200:
raise googlemaps.exceptions.HTTPError(resp.status_code)
body = resp.json()
api_status = body["status"]
if api_status == "OK" or api_status == "ZERO_RESULTS":
return body
if api_status == "OVER_QUERY_LIMIT":
raise googlemaps.exceptions._RetriableRequest()
if "error_message" in body:
raise googlemaps.exceptions.ApiError(api_status,
body["error_message"])
else:
raise googlemaps.exceptions.ApiError(api_status)
def _generate_auth_url(self, path, params, accepts_clientid):
"""Returns the path and query string portion of the request URL, first
adding any necessary parameters.
:param path: The path portion of the URL.
:type path: string
:param params: URL parameters.
:type params: dict or list of key/value tuples
:rtype: string
"""
# Deterministic ordering through sorting by key.
# Useful for tests, and in the future, any caching.
if type(params) is dict:
params = sorted(params.items())
else:
params = params[:] # Take a copy.
if accepts_clientid and self.client_id and self.client_secret:
params.append(("client", self.client_id))
path = "?".join([path, urlencode_params(params)])
sig = sign_hmac(self.client_secret, path)
return path + "&signature=" + sig
if self.key:
params.append(("key", self.key))
return path + "?" + urlencode_params(params)
raise ValueError("Must provide API key for this API. It does not accept "
"enterprise credentials.")
from googlemaps.directions import directions
from googlemaps.distance_matrix import distance_matrix
from googlemaps.elevation import elevation
from googlemaps.elevation import elevation_along_path
from googlemaps.geocoding import geocode
from googlemaps.geocoding import reverse_geocode
from googlemaps.timezone import timezone
from googlemaps.roads import snap_to_roads
from googlemaps.roads import speed_limits
from googlemaps.roads import snapped_speed_limits
Client.directions = directions
Client.distance_matrix = distance_matrix
Client.elevation = elevation
Client.elevation_along_path = elevation_along_path
Client.geocode = geocode
Client.reverse_geocode = reverse_geocode
Client.timezone = timezone
Client.snap_to_roads = snap_to_roads
Client.speed_limits = speed_limits
Client.snapped_speed_limits = snapped_speed_limits
def sign_hmac(secret, payload):
"""Returns a base64-encoded HMAC-SHA1 signature of a given string.
:param secret: The key used for the signature, base64 encoded.
:type secret: string
:param payload: The payload to sign.
:type payload: string
:rtype: string
"""
payload = payload.encode('ascii', 'strict')
secret = secret.encode('ascii', 'strict')
sig = hmac.new(base64.urlsafe_b64decode(secret), payload, hashlib.sha1)
out = base64.urlsafe_b64encode(sig.digest())
return out.decode('utf-8')
def urlencode_params(params):
"""URL encodes the parameters.
:param params: The parameters
:type params: list of key/value tuples.
"""
# urlencode does not handle unicode strings in Python 2.
# Firstly, normalize the values so they get encoded correctly.
params = [(key, normalize_for_urlencode(val)) for key, val in params]
# Secondly, unquote unreserved chars which are incorrectly quoted
# by urllib.urlencode, causing invalid auth signatures. See GH #72
# for more info.
return requests.utils.unquote_unreserved(urlencode(params))
try:
unicode
# NOTE(cbro): `unicode` was removed in Python 3. In Python 3, NameError is
# raised here, and caught below.
def normalize_for_urlencode(value):
"""(Python 2) Converts the value to a `str` (raw bytes)."""
if isinstance(value, unicode):
return value.encode('utf8')
if isinstance(value, str):
return value
return normalize_for_urlencode(str(value))
except NameError:
def normalize_for_urlencode(value):
"""(Python 3) No-op."""
# urlencode in Python 3 handles all the types we are passing it.
return value
|
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from json import JSONDecodeError
import time
import boto3
from botocore.exceptions import ClientError
import requests
from streamalert.apps.config import AppConfig
from streamalert.apps.batcher import Batcher
from streamalert.shared.logger import get_logger
LOGGER = get_logger(__name__)
def _report_time(func):
"""Decorator that returns the time the wrapped function took to run
This should not be applied to functions where the return value is needed by the caller
Returns:
float: time, in seconds, for which the wrapped function ran
"""
def _wrapper(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
total = time.time() - start
LOGGER.info('[%s] Function executed in %.4f seconds.', func.__name__, total)
return total
return _wrapper
def safe_timeout(func):
"""Try/Except decorator to catch any timeout error raised by requests
Args:
func (im_func): Function wrapper for safety catching requests that
could result in a connection or read timeout.
"""
def _wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except requests.exceptions.Timeout:
LOGGER.exception('[%s] Request timed out', func.__name__)
return False, None
return _wrapper
class AppIntegration(metaclass=ABCMeta):
"""Base class for all app integrations to be implemented for various services"""
# This _POLL_BUFFER_MULTIPLIER is a multiplier that will be used, along with the time it
# took to perform an API request and forward logs, to determine if there is enough
# time remaining in the execution of this function to perform another request.
# The buffer is also to account for any finalization that must occur, like config
# saving to parameter store and spawning a new Lambda invocation if there are more
# logs to poll for this interval
_POLL_BUFFER_MULTIPLIER = 1.5
# _DEFAULT_REQUEST_TIMEOUT indicates how long the requests library will wait before timing
# out for both get and post requests. This applies to both connection and read timeouts
_DEFAULT_REQUEST_TIMEOUT = 3.05
# _EOF_SECONDS_BUFFER is the end-of-function padding in seconds needed to handle cleanup, etc
_EOF_SECONDS_BUFFER = 2
def __init__(self, event, context):
self._config = AppConfig.load_config(event, context)
self._batcher = Batcher(self._config.function_name, self._config.destination_function)
self._gathered_log_count = 0
self._more_to_poll = False
self._poll_count = 0
self._last_timestamp = 0
self._context = {}
def __str__(self):
return self.type()
@classmethod
@abstractproperty
def service(cls):
"""Get this log's origin service
This should be implemented by all subclasses.
Examples: 'duo', 'google', 'onelogin', 'box', etc
Returns:
str: The originating service name for these logs.
"""
@classmethod
@abstractproperty
def _type(cls):
"""Get the specific type of log for this app
This should be implemented by all subclasses.
Returns:
str: The specific type of log (auth, admin, events etc)
"""
@classmethod
def type(cls):
"""Returns a combination of the service and log type
Returns:
str: The specific type of log (duo_auth, duo_admin, google_admin, etc)
"""
return '_'.join([cls.service(), cls._type()])
@classmethod
def required_auth_info(cls):
"""Public method to get the expected info that this service's auth dict should contain.
This public method calls the protected `_required_auth_info` and then validates its
type to ensure the caller does not get a non-iterable result due to a poor implementation
by a subclass.
Returns:
dict: Required authentication keys, with optional description and
format they should follow
"""
req_auth_info = cls._required_auth_info()
return req_auth_info if isinstance(req_auth_info, dict) else dict()
@classmethod
@abstractmethod
def _required_auth_info(cls):
"""Protected method to get the expected info that this service's auth dict should contain.
This must be implemented by subclasses and provide context as to what authentication
information is required as well as a description of the data and an optional regex
that the data should conform to.
This is called from the public `required_auth_info` method and validated there.
Returns:
dict: Required authentication keys, with optional description and
format they should follow
"""
@abstractmethod
def _gather_logs(self):
"""Get gathered logs from the service
This should be implemented by all subclasses.
Returns:
list or bool: The list of logs fetched from the service, or False if
there was an error during log collection.
"""
@abstractmethod
def _sleep_seconds(self):
"""Get the amount of time this service should sleep before performing another poll.
This should be implemented by all subclasses and is necessary by some services
to avoid overloading the API with requests.
Returns:
int: Number of seconds the polling function should sleep for
"""
@classmethod
def date_formatter(cls):
"""Returns a format string to assist in formatting dates for this service
Returns:
str: A format string for formatting date/time values (ie: '%Y-%m-%dT%H:%M:%SZ')
"""
def _sleep(self):
"""Function to sleep the looping"""
# Do not sleep if this is the first poll
if self._poll_count == 0:
LOGGER.debug('Skipping sleep for first poll')
return
# Sleep for n seconds so the called API does not return a bad response
sleep_for_secs = self._sleep_seconds()
LOGGER.debug('[%s] Sleeping for %d seconds...', self, sleep_for_secs)
time.sleep(sleep_for_secs)
def _initialize(self):
"""Method for performing any startup steps, like setting state to running"""
# Perform another safety check to make sure this is not being invoked already
if self._config.is_running:
LOGGER.warning('[%s] App already running', self)
return False
# Check if this is an invocation spawned from a previous partial execution
# Return if the config is marked as 'partial' but the invocation type is wrong
if not self._config.is_successive_invocation and self._config.is_partial:
LOGGER.error('[%s] App in partial execution state, exiting', self)
return False
LOGGER.info('[%s] Starting app', self)
LOGGER.info('App executing as a successive invocation: %s',
self._config.is_successive_invocation)
# Validate the auth in the config. This raises an exception upon failure
self._config.validate_auth(set(self.required_auth_info()))
self._config.set_starting_timestamp(self.date_formatter())
self._last_timestamp = self._config.last_timestamp
self._context = self._config.context
# Mark this app as running, which updates the parameter store
self._config.mark_running()
return True
def _finalize(self):
"""Method for performing any final steps, like saving applicable state
This function is also used to invoke a new copy of this lambda in the case
that there are more logs available to collect.
"""
if not self._last_timestamp:
LOGGER.error('Ending last timestamp is 0. This should not happen and is likely '
'due to the subclass not setting this value.')
if self._last_timestamp == self._config.start_last_timestamp:
LOGGER.info('Ending last timestamp is the same as the beginning last timestamp. '
'This could occur if there were no logs collected for this execution.')
LOGGER.info('[%s] App complete; gathered %d logs in %d polls.',
self, self._gathered_log_count, self._poll_count)
self._config.last_timestamp = self._last_timestamp
self._config.context = self._context
# If there are more logs to poll, invoke this app function again and mark
# the config as 'partial'. Marking the state as 'partial' prevents
# scheduled function invocations from running alongside chained invocations.
if self._more_to_poll:
self._config.mark_partial()
self._invoke_successive_app()
return
self._config.mark_success()
def _invoke_successive_app(self):
"""Invoke a successive app function to handle more logs
This is useful when there were more logs to collect than could be accomplished
in this execution. Instead of marking the config with 'success' and waiting
for the next scheduled execution, this will invoke the lambda again with an
'event' indicating there are more logs to collect. Other scheduled executions
will not have an 'event' to allow for this type of override, and will exit
when checking the 'self._config.is_running' property. This allows for chained
invocations without the worry of duplicated effort or collisions.
"""
lambda_client = boto3.client('lambda')
try:
response = lambda_client.invoke(
FunctionName=self._config.function_name,
InvocationType='Event',
Payload=self._config.successive_event,
Qualifier=self._config.function_version
)
except ClientError as err:
LOGGER.error('An error occurred while invoking a subsequent app function '
'(\'%s:%s\'). Error is: %s',
self._config.function_name,
self._config.function_version,
err.response)
raise
LOGGER.info('Invoking successive apps function \'%s\' with Lambda request ID \'%s\'',
self._config.function_name,
response['ResponseMetadata']['RequestId'])
def _check_http_response(self, response):
"""Method for checking for a valid HTTP response code
Returns:
bool: Indicator of whether or not this request was successful
"""
success = response is not None and (200 <= response.status_code <= 299)
if not success:
LOGGER.error(
'[%s] HTTP request failed: [%d] %s', self, response.status_code, response.content
)
return success
@safe_timeout
def _make_get_request(self, full_url, headers, params=None):
"""Method for returning the json loaded response for this GET request
Returns:
tuple (bool, dict): False if the was an error performing the request,
and the dictionary loaded from the json response
"""
LOGGER.debug('[%s] Making GET request on poll #%d', self, self._poll_count)
# Perform the request and return the response as a dict
response = requests.get(full_url, headers=headers,
params=params, timeout=self._DEFAULT_REQUEST_TIMEOUT)
return self._check_http_response(response), response.json()
@safe_timeout
def _make_post_request(self, full_url, headers, data, is_json=True):
"""Method for returning the json loaded response for this POST request
Returns:
tuple (bool, dict|None): The first return value will be False if there
was an error performing the request.
The second return value will be None if JSONDecodeError raised,
otherwise it will be the dictionary loaded from the json response.
"""
LOGGER.debug('[%s] Making POST request on poll #%d', self, self._poll_count)
# Perform the request and return the response as a dict
if is_json:
response = requests.post(full_url, headers=headers,
json=data, timeout=self._DEFAULT_REQUEST_TIMEOUT)
else:
# if content type is form-encoded, the param is 'data' rather than 'json'
response = requests.post(full_url, headers=headers,
data=data, timeout=self._DEFAULT_REQUEST_TIMEOUT)
try:
return self._check_http_response(response), response.json()
except JSONDecodeError:
# https://github.com/airbnb/streamalert/issues/998
# When response returns Gateway Timeout with status_code 504, the response
# object will return empty string and raises JSONDecoderError a when .json() refers to.
# See https://github.com/psf/requests/blob/v2.22.0/requests/models.py#L853
# Instead of raising exception, we can just return False, None
return False, None
@_report_time
def _gather(self):
"""Protected entry point to perform the gather that returns the time the process took
Returns:
float: time, in seconds, for which the function ran
"""
# Make this request sleep if the API throttles requests
self._sleep()
# Increment the poll count
self._poll_count += 1
logs = self._gather_logs()
# Make sure there are logs, this can be False if there was an issue polling
# of if there are no new logs to be polled
if not logs:
self._more_to_poll = False
LOGGER.error('[%s] Gather process was not able to poll any logs '
'on poll #%d', self, self._poll_count)
return
# Increment the count of logs gathered
self._gathered_log_count += len(logs)
# Utilize the batcher to send logs to the classifier function
self._batcher.send_logs(logs)
LOGGER.debug('Updating config last timestamp from %s to %s',
self._config.last_timestamp, self._last_timestamp)
# Save the config's last timestamp after each function run
self._config.last_timestamp = self._last_timestamp
def gather(self):
"""Public method for actual gathering of logs"""
# Initialize the app, saving state to 'running'
if not self._initialize():
return
try:
# Add a 50% buffer to the time it took to account for some unforeseen delay and to give
# this function enough time to spawn a new invocation if there are more logs to poll
while (((self._gather() * self._POLL_BUFFER_MULTIPLIER) + self._sleep_seconds()) <
self._remaining_seconds):
LOGGER.debug('[%s] More logs to poll: %s', self, self._more_to_poll)
self._config.report_remaining_seconds()
if not self._more_to_poll:
break
# Reset the boolean indicating that there is more data to poll. Subclasses should
# set this to 'True' within their implementation of the '_gather_logs' function
self._more_to_poll = not self._more_to_poll
LOGGER.debug('[%s] Gathered all logs possible for this execution. More logs to poll: '
'%s', self, self._more_to_poll)
self._config.report_remaining_seconds()
# Finalize, saving state to 'succeeded'
self._finalize()
finally:
# Make sure the config is not left marked as running, which could be problematic
if self._config and self._config.is_running:
self._config.mark_failure()
@property
def _remaining_seconds(self):
return (self._config.remaining_ms() / 1000.0) - self._EOF_SECONDS_BUFFER
|
|
"""Various implementations of the k-medoids algorithm."""
import numpy as np
from numba import jit
from clustering.kmedoids_helper import _get_clusters, _get_medoid
from clustering.metrics import _dissimilarity_matrix
def pam(data, k, metric=None, method='memory'):
"""Partitioning Around Medoids, a realization of k-medoids.
Parameters
----------
data : (n,) ndarray or (n, n) ndarray
Data set or dissimilarity matrix.
k : int
Number of desired clusters.
metric : function, optional
Function to compute pairwise distances.
method : {'memory', 'hybrid', 'cpu'}
Implementation to use.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object was
assigned, where the cluster number is defined as the object number of
the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Algorithm
---------
1. Arbitrarily choose k data objects as the initial medoids.
2. Assign every data object to its closest medoid.
3. Recompute the medoid for every cluster as the member of the
cluster that minimizes the sum of distances with respect to all the
other members.
4. If medoids have changed, go back to step 2.
Notes
-----
The three methods differs in the way clusters and medoids are computer.
* 'memory' (all-memory): Uses the dissimilarity matrix. It is very fast,
since all operations are done on numpy arrays by numpy functions (highly
optimized).
* 'hybrid': Computes distances when needed, without needing to store the
whole dissimilarity matrix. Uses less memory than 'memory' and still
uses numpy functions, but needs to compute distances anew every time and
that adds some additional computation time.
* 'cpu' (all-computations): Computes distances directly and does not
restort to numpy functions. It may be very slow, but can also work with
huge datasets.
Anyway, if this is the case it would be better to use CLARA or CLARANS,
that deal with the problem in a reasonable time and still give very good
results.
References
----------
.. Leonard Kaufman, Peter J. Rousseeuw, "Finding Groups in Data. An
Introduction to Cluster Analysis"
"""
# choose the right implementation (all-memory, hybrid, all-computations)
get_clusters = _get_clusters(metric, method)
get_medoid = _get_medoid(metric, method)
n = data.shape[0]
# step 1
# arbitrarily choose k data objects as the initial medoids
medoids = np.random.choice(n, k, replace=False)
while True:
changed = False
# step 2
# assign every data object to its closest medoid
clusterid, _ = get_clusters(data, medoids)
# make sure every medoid stays in its cluster, otherwise there will be
# problems with dissimilarities matrix with zeros outside the diagonal
clusterid[medoids] = medoids
# step 3
# recompute the medoid for every cluster as the member of the
# cluster that minimizes the sum of distances with respect to all the
# other members
new_medoids = np.copy(medoids)
for i, medoid in enumerate(medoids):
cluster = np.where(clusterid == medoid)[0]
new_medoid = get_medoid(data, cluster)
if medoid != new_medoid:
changed = True
new_medoids[i] = new_medoid
# step 4
# if all the medoids have not changed, we reached convergence and hence
# the algorithm has finished
if not changed:
break
else:
medoids = np.copy(new_medoids)
return get_clusters(data, medoids)
def pam_npass(data, k, metric=None, method='memory', npass=1):
"""Partitioning Around Medoids, a realization of k-medoids.
Parameters
----------
data : (n,) ndarray or (n, n) ndarray
Data set or dissimilarity matrix.
k : int
Number of desired clusters.
metric : function, optional
Function to compute pairwise distances.
method : {'memory', 'hybrid', 'cpu'}
Implementation to use.
npass : int, optional
The number of times the k-medoids clustering algorithm is performed,
each time with a different (random) initial condition.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object was
assigned, where the cluster number is defined as the object number of
the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
nfound : int
The number of times the optimal solution was found.
"""
# repeat the k-medoids algorithm npass times and select the best
clusterid = -1
error = np.inf
nfound = 0
for _ in range(npass):
new_clusterid, new_error = pam(data, k, metric, method)
if new_error < error:
# we found a better solution
error = new_error
clusterid = new_clusterid
nfound = 1
else:
if np.array_equal(clusterid, new_clusterid):
# we found another optimal solution
nfound += 1
return clusterid, error, nfound
def clara(data, k, metric, samples=5, sampsize=None):
"""Clustering LARge Applications.
A simple way to extend PAM for larger data sets.
Parameters
----------
data : (n,) ndarray
Data set.
k : int
Number of desired clusters.
metric : function
Function to compute pairwise distances.
samples : int, optional
Number of samples to be drawn from the data set. The default, 5, is
rather small for historical reasons and we recommend to set samples an
order of magnitude larger.
sampsize : int, optional
Number of objects in each sample. sampsize should be higher than the
number of clusters (k) and at most the total number of objects.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object was
assigned, where the cluster number is defined as the object number of
the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Algorithm
---------
1. Draw a sample of ``sampsize`` objects randomly from the entire data set.
2. Apply PAM clustering to such sample.
3. Associate every object of the entire data set to the nearest of the k
medoids just found.
4. Compute the within-cluster sum of distance (error) of the clustering
obtained in the previous step. If this error is lower than the current
minimum, retain the k medoids found in step 2 as the best set so far.
5. Repeat step 1-4 ``samples`` times.
Notes
-----
This algorithm makes possible to cluster very large data sets. Anyway, the
sample drawn at each iteration might not be much representative of the
data. Hence, sampsize and samples should be chosen accordingly.
References
----------
.. Leonard Kaufman, Peter J. Rousseeuw, "Finding Groups in Data. An
Introduction to Cluster Analysis"
"""
# choose which implementation to use, hybrid or cpu
get_clusters = _get_clusters(metric, method='cpu')
dissimilarity_matrix = _dissimilarity_matrix(metric)
n = data.shape[0]
data = np.array(data)
if not sampsize:
# set the default sampsize value
sampsize = min(40 + 2*k, n)
error = np.inf
clusterid = np.empty(n, dtype=np.uint32)
for _ in range(samples):
# step 1
# draw a sample as a random subset of the original dataset
subset = np.random.choice(n, sampsize, replace=False)
# step 2
# compute the dissimilarity matrix of the sample and apply PAM
diss = dissimilarity_matrix(data[subset])
partial_clusterid = pam(diss, k)[0]
medoids = subset[np.unique(partial_clusterid)]
# step 3 and 4
# associate each object of the data set to the nearest medoid and
# compute the error of the clustering
new_clusterid, new_error = get_clusters(data, medoids)
# keep the new clustering only if it has a lower error
if new_error < error:
error = new_error
clusterid = new_clusterid
return clusterid, error
def _clarans(metric):
"""Clustering Large Applications based on RANdomized Search."""
# choose which implementation to use, hybrid or cpu
get_clusters = _get_clusters(metric, method='cpu')
@jit(nopython=True)
def clarans(data, k, numlocal, maxneighbor):
"""Clustering Large Applications based on RANdomized Search.
Parameters
----------
data : (n,) ndarray
Data set.
k : int
Number of desired clusters.
metric : function
Function to compute pairwise distances.
numlocal : int
Number of times to repeat the search for other local minima.
maxneighbor : int
Maximum number of the neighbors to look at.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object
was assigned, where the cluster number is defined as the object
number of the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Algorithm
---------
1. Choose an arbitrary node from the data set.
2. Consider a random neighbor of the current node.
3. If the random neighbor has a lower error than the current node, set
it as the current node.
4. Repeat step 2-3 ``maxneighbor`` times.
5. Repeat step 1-4 ``numlocal`` times and retain the best clustering.
Notes
-----
The best way to explain CLARANS is via a graph abstraction. In fact,
the process of finding k medoids can be viewed abstractly as searching
through a certain graph. In this graph, a set of k objects is called
node. Two nodes are neighbors if their sets differ by only one object.
Since a node represent a collection of k objects, they can be seen as
medoids and hence induce a clustering.
Each node can be assigned an error that is defined to be the total
dissimilarity (i.e. sum of distances) between every object and the
medoid of its cluster.
References
----------
.. R.T. Ng, Jiawei Han, "CLARANS: a method for clustering objects for
spatial data mining"
"""
n = data.shape[0]
choices = np.arange(n)
best_medoids = np.empty(k, dtype=np.uint32)
best_error = np.inf
min_dist = 0
for _ in range(numlocal):
# step 1
# choose an arbitrary node as starting medoids and compute its
# error
medoids = np.empty(k, dtype=np.uint32)
for i in range(k):
np.random.shuffle(choices)
medoids[i] = choices[-1]
choices = choices[:-1]
error = 0
for i in range(n):
min_dist = np.inf
for med in medoids:
dist = metric(data[i], data[med])
if dist < min_dist:
min_dist = dist
error += min_dist
for _ in range(maxneighbor):
# step 2
# find a random neighbor, i.e. change only one of the medoids
# with a random object (that is not already a medoid) of the
# whole data set
random_neigh = np.copy(medoids)
np.random.shuffle(choices)
non_med = choices[-1]
non_med_i = np.random.choice(k)
random_neigh[non_med_i] = non_med
# step 3
# compute the error of the random neighbor and compare it with
# the current node (i.e. current medoids)
new_error = 0
for i in range(n):
min_dist = np.inf
for med in random_neigh:
dist = metric(data[i], data[med])
if dist < min_dist:
min_dist = dist
new_error += min_dist
# choose the induced clustering with lower error
if new_error < error:
error = new_error
choices[-1] = medoids[non_med_i]
medoids = random_neigh
# retain the clustering solution with the lowest error
if error < best_error:
best_error = error
best_medoids = medoids
return get_clusters(data, best_medoids)
return clarans
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=g-classes-have-attributes
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import copy
import warnings
from keras import backend
from keras.engine import base_layer_v1 as base_layer
from keras.engine import base_layer_utils
from keras.legacy_tf_layers import variable_scope_shim
from keras.mixed_precision import policy
from keras.utils import tf_contextlib
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
_KERAS_STYLE_SCOPE = False
@keras_export(
v1=['keras.__internal__.legacy.layers.experimental.keras_style_scope'])
@tf_export(v1=['layers.experimental.keras_style_scope'])
@tf_contextlib.contextmanager
def keras_style_scope():
"""Use Keras-style variable management.
All tf.layers and tf RNN cells created in this scope use Keras-style
variable management. Creating such layers with a scope= argument is
disallowed, and reuse=True is disallowed.
The purpose of this scope is to allow users of existing layers to
slowly transition to a Keras layers API without breaking existing
functionality.
One example of this is when using TensorFlow's RNN classes with Keras
Models or Networks. Because Keras models do not properly set variable
scopes, users of RNNs may either accidentally share scopes between two
different models, or get errors about variables that already exist.
Example:
```python
class RNNModel(tf.keras.Model):
def __init__(self, name):
super(RNNModel, self).__init__(name=name)
self.rnn = tf.compat.v1.nn.rnn_cell.MultiRNNCell(
[tf.compat.v1.nn.rnn_cell.LSTMCell(64) for _ in range(2)])
def call(self, input, state):
return self.rnn(input, state)
model_1 = RNNModel("model_1")
model_2 = RNNModel("model_2")
# OK
output_1, next_state_1 = model_1(input, state)
# Raises an error about trying to create an already existing variable.
output_2, next_state_2 = model_2(input, state)
```
The solution is to wrap the model construction and execution in a keras-style
scope:
```python
with keras_style_scope():
model_1 = RNNModel("model_1")
model_2 = RNNModel("model_2")
# model_1 and model_2 are guaranteed to create their own variables.
output_1, next_state_1 = model_1(input, state)
output_2, next_state_2 = model_2(input, state)
assert len(model_1.weights) > 0
assert len(model_2.weights) > 0
assert(model_1.weights != model_2.weights)
```
Yields:
A keras layer style scope.
"""
global _KERAS_STYLE_SCOPE
stack = _KERAS_STYLE_SCOPE
_KERAS_STYLE_SCOPE = True
try:
yield
finally:
_KERAS_STYLE_SCOPE = stack
@keras_export(
v1=['keras.__internal__.legacy.layers.experimental.set_keras_style'])
@tf_export(v1=['layers.experimental.set_keras_style'])
def set_keras_style():
"""Use Keras-style variable management.
All tf.layers and tf RNN cells created after keras style ha been enabled
use Keras-style variable management. Creating such layers with a
scope= argument is disallowed, and reuse=True is disallowed.
The purpose of this function is to allow users of existing layers to
slowly transition to Keras layers API without breaking existing
functionality.
For more details, see the documentation for `keras_style_scope`.
Note, once keras style has been set, it is set globally for the entire
program and cannot be unset.
Example:
```python
set_keras_style()
model_1 = RNNModel(name="model_1")
model_2 = RNNModel(name="model_2")
# model_1 and model_2 are guaranteed to create their own variables.
output_1, next_state_1 = model_1(input, state)
output_2, next_state_2 = model_2(input, state)
assert len(model_1.weights) > 0
assert len(model_2.weights) > 0
assert(model_1.weights != model_2.weights)
```
"""
global _KERAS_STYLE_SCOPE
_KERAS_STYLE_SCOPE = True
def _is_in_keras_style_scope():
global _KERAS_STYLE_SCOPE
return _KERAS_STYLE_SCOPE
@keras_export(v1=['keras.__internal__.legacy.layers.Layer'])
@tf_export(v1=['layers.Layer'])
class Layer(base_layer.Layer):
"""Base layer class.
It is considered legacy, and we recommend the use of `tf.keras.layers.Layer`
instead.
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
Read-only properties:
name: The name of the layer (string).
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and
non-trainable.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
Mutable properties:
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
"""
def __init__(self, trainable=True, name=None, dtype=None,
**kwargs):
# For backwards compatibility, legacy layers do not use `ResourceVariable`
# by default.
self._use_resource_variables = False
scope = kwargs.pop('_scope', None)
self._reuse = kwargs.pop('_reuse', None)
# Avoid an incorrect lint error
self._trainable_weights = []
self.built = False
if dtype is None:
# Indicates to infer dtype from inputs. When the V2 dtype behavior is
# enabled, Keras layers default their dtype to floatx instead, so we pass
# an "_infer" policy to keep the old V1 behavior.
dtype = policy.Policy('_infer')
if 'autocast' not in kwargs:
kwargs['autocast'] = False
# Mark that legacy layers should not be instrumented as Keras usage
self._disable_keras_instrumentation = True
super(Layer, self).__init__(trainable=trainable, name=name, dtype=dtype,
**kwargs)
if _is_in_keras_style_scope():
if scope is not None:
raise ValueError(
'scope argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(scope))
if self._reuse is not None:
raise ValueError(
'reuse argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(self._reuse))
self._keras_style = True
else:
self._keras_style = False
self._call_has_scope_arg = 'scope' in self._call_fn_args
if scope:
with tf.compat.v1.variable_scope(scope) as captured_scope:
self._scope = captured_scope
else:
self._scope = None
self._current_scope = None
def apply(self, *args, **kwargs):
return self(*args, **kwargs)
# We no longer track graph in tf.layers layers. This property is only kept to
# maintain API backward compatibility.
@property
def graph(self):
warnings.warn(
'`Layer.graph` is deprecated and '
'will be removed in a future version. '
'Please stop using this property because tf.layers layers no '
'longer track their graph.',
stacklevel=2)
if tf.executing_eagerly():
raise RuntimeError('Layer.graph not supported when executing eagerly.')
return None
def _init_set_name(self, name):
# Determine layer name (non-unique).
if isinstance(name, tf.compat.v1.VariableScope):
base_name = name.name
self._name, _ = self._make_unique_name()
else:
base_name = name
self._name = name
if not name:
self._name, base_name = self._make_unique_name()
self._base_name = base_name
def _make_unique_name(self, name_uid_map=None, avoid_names=None,
namespace='', zero_based=False):
base_name = base_layer.to_snake_case(self.__class__.__name__)
name = backend.unique_object_name(
base_name,
name_uid_map=name_uid_map,
avoid_names=avoid_names,
namespace=namespace,
zero_based=zero_based)
return (name, base_name)
@property
def scope_name(self):
if not self._scope:
raise ValueError('No name available for layer scope because the layer "' +
self._name + '" has not been used yet. The scope name ' +
' is determined the first time the layer instance is ' +
'called. You must therefore call the layer before ' +
'querying `scope_name`.')
return self._scope.name
def add_loss(self, losses, inputs=None):
previous_losses_length = len(self._losses)
previous_callable_losses_length = len(self._callable_losses)
super(Layer, self).add_loss(losses, inputs=inputs)
if not tf.executing_eagerly():
# TODO(fchollet): deprecate collection below.
new_losses = self._losses[previous_losses_length:]
new_callable_losses = self._callable_losses[
previous_callable_losses_length:]
for regularizer in new_callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
new_losses.append(loss_tensor)
_add_elements_to_collection(
new_losses,
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)
def _name_scope(self): # pylint: disable=method-hidden
"""Determines op naming for the Layer."""
if self._keras_style:
return super(Layer, self)._name_scope()
return self._current_scope.original_name_scope
def _set_scope(self, scope=None):
if self._scope is None:
# If constructed with _scope=None, lazy setting of scope.
if self._reuse:
with tf.compat.v1.variable_scope(
scope if scope is not None else self._base_name) as captured_scope:
self._scope = captured_scope
else:
with tf.compat.v1.variable_scope(
scope, default_name=self._base_name) as captured_scope:
self._scope = captured_scope
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
use_resource=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.compat.v1.VariableAggregation.NONE,
partitioner=None,
**kwargs):
"""Adds a new variable to the layer, or gets an existing one; returns it.
Args:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
constraint: constraint instance (callable).
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
partitioner: (optional) partitioner instance (callable). If
provided, when the requested variable is created it will be split
into multiple partitions according to `partitioner`. In this case,
an instance of `PartitionedVariable` is returned. Available
partitioners include `tf.compat.v1.fixed_size_partitioner` and
`tf.compat.v1.variable_axis_size_partitioner`. For more details, see
the documentation of `tf.compat.v1.get_variable` and the "Variable
Partitioners and Sharding" section of the API guide.
**kwargs: Additional keyword arguments.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partitioned variable regularization and
eager execution is enabled.
ValueError: When trainable has been set to True with synchronization
set as `ON_READ`.
"""
for kwarg in kwargs:
if kwarg != 'experimental_autocast':
raise TypeError('Unknown keyword argument:', kwarg)
if self._keras_style:
return super(Layer, self).add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable and self.trainable,
constraint=constraint,
use_resource=use_resource,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.compat.v1.VariableAggregation.NONE,
partitioner=partitioner,
**kwargs)
if synchronization == tf.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
def _should_add_regularizer(variable, existing_variable_set):
if base_layer_utils.is_split_variable(variable):
for var in variable:
if var in existing_variable_set:
return False
return True
else:
return variable not in existing_variable_set
init_graph = None
if not tf.executing_eagerly():
default_graph = tf.compat.v1.get_default_graph()
if default_graph.building_function:
with tf.init_scope():
# Retrieve the variables from the graph into which variables
# will be lifted; if initialization ops will be lifted into
# the eager context, then there is nothing to retrieve, since variable
# collections are not supported when eager execution is enabled.
if not tf.executing_eagerly():
init_graph = tf.compat.v1.get_default_graph()
existing_variables = set(tf.compat.v1.global_variables())
else:
# Initialization ops will not be lifted out of the default graph.
init_graph = default_graph
existing_variables = set(tf.compat.v1.global_variables())
if dtype is None:
dtype = self.dtype or tf.float32
self._set_scope(None)
reuse = self.built or self._reuse
prev_len_trainable = len(self._trainable_weights)
with tf.compat.v1.variable_scope(
self._scope, reuse=reuse, auxiliary_name_scope=False) as scope:
self._current_scope = scope
with backend.name_scope(self._name_scope()): # pylint: disable=not-callable
use_resource = (use_resource or
self._use_resource_variables or
scope.use_resource)
if initializer is None:
initializer = scope.initializer
variable = super(Layer, self).add_weight(
name,
shape,
dtype=tf.as_dtype(dtype),
initializer=initializer,
trainable=trainable and self.trainable,
constraint=constraint,
partitioner=partitioner,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
getter=tf.compat.v1.get_variable,
**kwargs)
if regularizer:
if (tf.compat.v1.executing_eagerly_outside_functions()
or _should_add_regularizer(variable, existing_variables)):
self._handle_weight_regularization(name, variable, regularizer)
var_store = vs._get_default_variable_store() # pylint: disable=protected-access
# When the shim to get variable scope working in TF2 is used,
# We need to explicitly make the shim track the regularization
# losses as the collections will not be accessible.
if hasattr(var_store, 'add_regularizer'):
var_store.add_regularizer(variable, regularizer)
if init_graph is not None:
# Handle edge case where a custom getter has overridden `trainable`.
# There is one known occurrence of this, in unit test
# testBasicRNNCellNotTrainable in
# contrib.rnn.python.kernel_tests.core_rnn_cell_test
with init_graph.as_default():
trainable_variables = tf.compat.v1.trainable_variables()
if (trainable and self.trainable and
variable not in trainable_variables):
# A custom getter / variable scope overrode the trainable flag.
extra_trainable_vars = self._trainable_weights[prev_len_trainable:]
self._trainable_weights = self._trainable_weights[
:prev_len_trainable]
self._non_trainable_weights += extra_trainable_vars
return variable
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Args:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
**Note**: kwarg `scope` is reserved for use by the layer.
Returns:
Output tensor(s).
Note:
- If the layer's `call` method takes a `scope` keyword argument,
this argument will be automatically set to the current variable scope.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
"""
scope = kwargs.pop('scope', None)
if self._keras_style:
if scope is not None:
raise ValueError(
'scope argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(scope))
return super(Layer, self).__call__(inputs, *args, **kwargs)
self._set_scope(scope)
if self.built:
try:
# Some classes which inherit from Layer do not use its constructor, so
# rather than initializing to None we check for an AttributeError.
scope_context_manager = self._always_reuse_variable_scope # pylint: disable=access-member-before-definition
except AttributeError:
scope_context_manager = None
if scope_context_manager is None:
# From this point we will always set reuse=True, so create a "final"
# variable scope with this setting. We avoid re-creating variable scopes
# after this point as an optimization.
scope_context_manager = tf.compat.v1.variable_scope(
self._scope, reuse=True, auxiliary_name_scope=False)
# Do not cache variable scopes if Eager mode is enabled. If Eager mode
# is enabled then we don't want to reuse scopes because the cached scope
# might be from a FuncGraph or Eager scope we are no longer in.
if not tf.compat.v1.executing_eagerly_outside_functions():
self._always_reuse_variable_scope = scope_context_manager
else:
scope_context_manager = tf.compat.v1.variable_scope(
self._scope, reuse=self._reuse, auxiliary_name_scope=False)
with scope_context_manager as scope:
self._current_scope = scope
try:
call_has_scope_arg = self._call_has_scope_arg
except AttributeError:
self._call_fn_args = variable_scope_shim.fn_args(self.call)
self._call_has_scope_arg = 'scope' in self._call_fn_args
call_has_scope_arg = self._call_has_scope_arg
if call_has_scope_arg:
kwargs['scope'] = scope
# Actually call layer
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
if not tf.executing_eagerly():
# Update global default collections.
_add_elements_to_collection(self.updates, tf.compat.v1.GraphKeys.UPDATE_OPS)
return outputs
def __deepcopy__(self, memo):
no_copy = set(['_graph', '_thread_local', '_metrics_lock'])
shallow_copy = set(['_scope', '_always_reuse_variable_scope'])
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k in no_copy:
setattr(result, k, v)
elif k in shallow_copy:
setattr(result, k, copy.copy(v))
elif base_layer.is_tensor_or_tensor_list(v):
setattr(result, k, v)
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
def __setattr__(self, value, name):
# By-pass the automatic dependency tracking performed by the parent Layer.
super(tf.__internal__.tracking.Trackable, self).__setattr__(value, name) # pylint: disable=bad-super-call
@property
def _is_legacy_layer(self):
"""Used by keras to check compatibility. This should not be overridden."""
return True
def _add_elements_to_collection(elements, collection_list):
if tf.executing_eagerly():
raise RuntimeError('Using collections from Layers not supported in Eager '
'mode. Tried to add %s to %s' % (elements,
collection_list))
elements = tf.nest.flatten(elements)
collection_list = tf.nest.flatten(collection_list)
for name in collection_list:
collection = tf.compat.v1.get_collection_ref(name)
collection_set = {id(e) for e in collection}
for element in elements:
if id(element) not in collection_set:
collection.append(element)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import copy
import functools
import six
from stevedore import extension
from heat.common import exception
from heat.db import api as db_api
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
__all__ = ['Template']
_template_classes = None
def get_version(template_data, available_versions):
version_keys = set(key for key, version in available_versions)
candidate_keys = set(k for k, v in six.iteritems(template_data) if
isinstance(v, basestring))
keys_present = version_keys & candidate_keys
if len(keys_present) > 1:
explanation = _('Ambiguous versions (%s)') % ', '.join(keys_present)
raise exception.InvalidTemplateVersion(explanation=explanation)
try:
version_key = keys_present.pop()
except KeyError:
explanation = _('Template version was not provided')
raise exception.InvalidTemplateVersion(explanation=explanation)
return version_key, template_data[version_key]
def _get_template_extension_manager():
return extension.ExtensionManager(
namespace='heat.templates',
invoke_on_load=False,
verify_requirements=False,
on_load_failure_callback=raise_extension_exception)
def raise_extension_exception(extmanager, ep, err):
raise TemplatePluginNotRegistered(name=ep.name, error=six.text_type(err))
class TemplatePluginNotRegistered(exception.HeatException):
msg_fmt = _("Could not load %(name)s: %(error)s")
def get_template_class(template_data):
global _template_classes
if _template_classes is None:
mgr = _get_template_extension_manager()
_template_classes = dict((tuple(name.split('.')), mgr[name].plugin)
for name in mgr.names())
available_versions = _template_classes.keys()
version = get_version(template_data, available_versions)
version_type = version[0]
try:
return _template_classes[version]
except KeyError:
av_list = [v for k, v in available_versions if k == version_type]
msg_data = {'version': ': '.join(version),
'version_type': version_type,
'available': ', '.join(v for v in av_list)}
if len(av_list) > 1:
explanation = _('"%(version)s". "%(version_type)s" '
'should be one of: %(available)s') % msg_data
else:
explanation = _('"%(version)s". "%(version_type)s" '
'should be: %(available)s') % msg_data
raise exception.InvalidTemplateVersion(explanation=explanation)
class Template(collections.Mapping):
'''A stack template.'''
def __new__(cls, template, *args, **kwargs):
'''Create a new Template of the appropriate class.'''
if cls != Template:
TemplateClass = cls
else:
TemplateClass = get_template_class(template)
return super(Template, cls).__new__(TemplateClass)
def __init__(self, template, template_id=None, files=None):
'''
Initialise the template with a JSON object and a set of Parameters
'''
self.id = template_id
self.t = template
self.files = files or {}
self.maps = self[self.MAPPINGS]
self.version = get_version(self.t, _template_classes.keys())
def __deepcopy__(self, memo):
return Template(copy.deepcopy(self.t, memo), files=self.files)
@classmethod
def load(cls, context, template_id, t=None):
'''Retrieve a Template with the given ID from the database.'''
if t is None:
t = db_api.raw_template_get(context, template_id)
return cls(t.template, template_id=template_id, files=t.files)
def store(self, context=None):
'''Store the Template in the database and return its ID.'''
rt = {
'template': self.t,
'files': self.files
}
if self.id is None:
new_rt = db_api.raw_template_create(context, rt)
self.id = new_rt.id
else:
db_api.raw_template_update(context, self.id, rt)
return self.id
def __iter__(self):
'''Return an iterator over the section names.'''
return (s for s in self.SECTIONS
if s not in self.SECTIONS_NO_DIRECT_ACCESS)
def __len__(self):
'''Return the number of sections.'''
return len(self.SECTIONS) - len(self.SECTIONS_NO_DIRECT_ACCESS)
@abc.abstractmethod
def param_schemata(self):
'''Return a dict of parameters.Schema objects for the parameters.'''
pass
@abc.abstractmethod
def parameters(self, stack_identifier, user_params):
'''Return a parameters.Parameters object for the stack.'''
pass
@abc.abstractmethod
def resource_definitions(self, stack):
'''Return a dictionary of ResourceDefinition objects.'''
pass
@abc.abstractmethod
def add_resource(self, definition, name=None):
'''Add a resource to the template.
The resource is passed as a ResourceDefinition object. If no name is
specified, the name from the ResourceDefinition should be used.
'''
pass
def remove_resource(self, name):
'''Remove a resource from the template.'''
self.t.get(self.RESOURCES, {}).pop(name)
def parse(self, stack, snippet):
return parse(self.functions, stack, snippet)
def validate(self):
'''Validate the template.
Validates the top-level sections of the template as well as syntax
inside select sections. Some sections are not checked here but in
code parts that are responsible for working with the respective
sections (e.g. parameters are check by parameters schema class).
'''
# check top-level sections
for k in self.t.keys():
if k not in self.SECTIONS:
raise exception.InvalidTemplateSection(section=k)
# check resources
for res in self[self.RESOURCES].values():
try:
if not res.get('Type'):
message = _('Every Resource object must '
'contain a Type member.')
raise exception.StackValidationFailed(message=message)
except AttributeError:
type_res = type(res)
if isinstance(res, unicode):
type_res = "string"
message = _('Resources must contain Resource. '
'Found a [%s] instead') % type_res
raise exception.StackValidationFailed(message=message)
def parse(functions, stack, snippet):
recurse = functools.partial(parse, functions, stack)
if isinstance(snippet, collections.Mapping):
if len(snippet) == 1:
fn_name, args = next(six.iteritems(snippet))
Func = functions.get(fn_name)
if Func is not None:
return Func(stack, fn_name, recurse(args))
return dict((k, recurse(v)) for k, v in six.iteritems(snippet))
elif (not isinstance(snippet, basestring) and
isinstance(snippet, collections.Iterable)):
return [recurse(v) for v in snippet]
else:
return snippet
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""External user permission utilities."""
from clusterfuzz._internal.base import memoize
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import fuzz_target_utils
from clusterfuzz._internal.datastore import ndb_utils
MEMCACHE_TTL_IN_SECONDS = 15 * 60
def _fuzzers_for_job(job_type, include_parents):
"""Return all fuzzers that have the job associated.
Args:
job_type: The job type.
include_parents: Include the parent fuzzer.
Returns:
A list of fuzzer names.
"""
fuzzers = []
engine_fuzzers = data_handler.get_fuzzing_engines()
for fuzzer in data_types.Fuzzer.query(data_types.Fuzzer.jobs == job_type):
# Add this if we're including all parents or this is not an engine fuzzer
# with fuzz targets.
if include_parents or fuzzer.name not in engine_fuzzers:
fuzzers.append(fuzzer.name)
for target_job in fuzz_target_utils.get_fuzz_target_jobs(job=job_type):
fuzzers.append(target_job.fuzz_target_name)
return sorted(fuzzers)
def _expand_prefix(all_names, prefix):
"""Expand the given prefix into real entity names.
Args:
all_names: A list of all entity names.
prefix: A prefix string.
Returns:
A list of entity names that the pattern expands to.
"""
return [name for name in all_names if name.startswith(prefix)]
def _get_permissions_query_for_user(user_email, entity_kind=None):
"""Get a permissions query for a given user.
Args:
user_email: The email of the user.
entity_kind: The type (data_types.PermissionEntityKind) of the permission to
filter by, or None.
Returns:
A ndb.Query giving the permissions for the given parameters.
"""
permissions_for_user = data_types.ExternalUserPermission.query(
data_types.ExternalUserPermission.email == utils.normalize_email(
user_email))
if entity_kind is not None:
permissions_for_user = permissions_for_user.filter(
data_types.ExternalUserPermission.entity_kind == entity_kind)
return permissions_for_user
def _allowed_entities_for_user(user_email, entity_kind):
"""Return the entity names that the given user can access.
Args:
user_email: The email of the user.
entity_kind: The type (data_types.PermissionEntityKind) of the entity.
Returns:
A list of entity names that the user has access to.
"""
if not user_email:
return []
allowed = []
permissions = _get_permissions_query_for_user(user_email, entity_kind)
if entity_kind == data_types.PermissionEntityKind.FUZZER:
all_names = data_handler.get_all_fuzzer_names_including_children()
else:
all_names = data_handler.get_all_job_type_names()
for permission in permissions:
if permission.is_prefix:
allowed.extend(_expand_prefix(all_names, permission.entity_name))
elif permission.entity_name in all_names:
allowed.append(permission.entity_name)
return sorted(allowed)
def _is_entity_allowed_for_user(user_email, name, entity_kind):
"""Return whether if the given user has access to the entity.
Args:
user_email: The email of the user.
name: The name of the entity.
entity_kind: The type of the entity.
Returns:
A bool indicating whether the given user has access to the entity.
"""
if not user_email or not name:
return False
permissions = _get_permissions_query_for_user(user_email, entity_kind)
for permission in permissions:
if permission.is_prefix:
if name.startswith(permission.entity_name):
return True
elif permission.entity_name == name:
return True
return False
def _allowed_users_for_entity(name, entity_kind, auto_cc=None):
"""Return a list of users that have permissions for the given entity.
Args:
name: The name of the entity.
entity_kind: The type (data_types.PermissionEntityKind) of the entity.
auto_cc: The Auto CC type (data_types.AutoCCType) to filter on, or None.
Returns:
A list of user emails that have permission to access the given entity.
"""
if not name:
return []
# Easy case: direct matches.
direct_match_permissions = data_types.ExternalUserPermission.query(
data_types.ExternalUserPermission.entity_kind == entity_kind,
data_types.ExternalUserPermission.entity_name == name,
ndb_utils.is_false(data_types.ExternalUserPermission.is_prefix),
projection=[data_types.ExternalUserPermission.email])
if auto_cc is not None:
direct_match_permissions = direct_match_permissions.filter(
data_types.ExternalUserPermission.auto_cc == auto_cc)
allowed_users = [permission.email for permission in direct_match_permissions]
# Find all permissions where the prefix matches the fuzzer_name.
# Unfortunately, Datastore doesn't give us an easy way of doing so. To iterate
# through a smaller set than every single permission, get all permissions that
# contain a prefix string <= than the actual fuzzer name and >= the first
# character.
prefix_match_permissions = data_types.ExternalUserPermission.query(
data_types.ExternalUserPermission.entity_kind == entity_kind,
data_types.ExternalUserPermission.entity_name <= name,
data_types.ExternalUserPermission.entity_name >= name[0],
ndb_utils.is_true(data_types.ExternalUserPermission.is_prefix),
projection=[
data_types.ExternalUserPermission.email,
data_types.ExternalUserPermission.entity_name
])
if auto_cc is not None:
prefix_match_permissions = prefix_match_permissions.filter(
data_types.ExternalUserPermission.auto_cc == auto_cc)
for permission in prefix_match_permissions:
if not permission.entity_name:
# No external user should have an empty prefix (access to all
# fuzzers/jobs).
continue
if name.startswith(permission.entity_name):
allowed_users.append(permission.email)
return sorted(allowed_users)
def _cc_users_for_entity(name, entity_type, security_flag):
"""Return CC users for entity."""
users = _allowed_users_for_entity(name, entity_type,
data_types.AutoCCType.ALL)
if security_flag:
users.extend(
_allowed_users_for_entity(name, entity_type,
data_types.AutoCCType.SECURITY))
return sorted(users)
@memoize.wrap(memoize.Memcache(MEMCACHE_TTL_IN_SECONDS))
def allowed_fuzzers_for_user(user_email,
include_from_jobs=False,
include_parents=False):
"""Return allowed fuzzers for the given user.
Args:
user_email: The email of the user.
include_from_jobs: Include all fuzzers for the allowed jobs of the user.
include_parents: Include parent fuzzers when there is no explicit permission
for the parent fuzzer, but there are permissions for its children as a
result of the user's job permissions. Only applies when
include_from_jobs is set.
Returns:
A list of fuzzer names for which this user is allowed to view information
about.
"""
allowed_fuzzers = _allowed_entities_for_user(
user_email, data_types.PermissionEntityKind.FUZZER)
if include_from_jobs:
allowed_jobs = allowed_jobs_for_user(user_email)
for allowed_job in allowed_jobs:
allowed_fuzzers.extend(_fuzzers_for_job(allowed_job, include_parents))
allowed_fuzzers = list(set(allowed_fuzzers))
return sorted(allowed_fuzzers)
@memoize.wrap(memoize.Memcache(MEMCACHE_TTL_IN_SECONDS))
def allowed_jobs_for_user(user_email):
"""Return allowed jobs for the given user.
Args:
user_email: The email of the user.
Returns:
A list of job names for which this user is allowed to view information
about.
"""
return _allowed_entities_for_user(user_email,
data_types.PermissionEntityKind.JOB)
def allowed_users_for_fuzzer(fuzzer_name):
"""Return allowed external users for the given fuzzer.
Args:
fuzzer_name: The name of the fuzzer.
Returns:
A list of user emails that are allowed to view information relating to this
fuzzer.
"""
# TODO(ochang): Once we support jobs, take that into account.
return _allowed_users_for_entity(fuzzer_name,
data_types.PermissionEntityKind.FUZZER)
def cc_users_for_fuzzer(fuzzer_name, security_flag):
"""Return external users that should be CC'ed according to the given rule.
Args:
fuzzer_name: The name of the fuzzer.
security_flag: Whether or not the CC is for a security issue.
Returns:
A list of user emails that should be CC'ed.
"""
return _cc_users_for_entity(
fuzzer_name, data_types.PermissionEntityKind.FUZZER, security_flag)
def is_fuzzer_allowed_for_user(user_email, fuzzer_name,
include_from_jobs=False):
"""Return whether if the given user has access to the fuzzer.
Args:
user_email: The email of the user.
fuzzer_name: The name of the fuzzer.
include_from_jobs: Include all fuzzers for the allowed jobs of the user.
Returns:
A bool indicating whether the given user has access to the fuzzer.
"""
is_allowed = _is_entity_allowed_for_user(
user_email, fuzzer_name, data_types.PermissionEntityKind.FUZZER)
if not is_allowed and include_from_jobs:
is_allowed = fuzzer_name in allowed_fuzzers_for_user(
user_email, include_from_jobs=True)
return is_allowed
def is_job_allowed_for_user(user_email, job_type):
"""Return whether if the given user has access to the job.
Args:
user_email: The email of the user.
job_type: The name of the job.
Returns:
A bool indicating whether the given user has access to the job.
"""
return _is_entity_allowed_for_user(user_email, job_type,
data_types.PermissionEntityKind.JOB)
def is_upload_allowed_for_user(user_email):
"""Return whether if the given user has upload permissions.
Args:
user_email: The email of the user.
Returns:
A bool indicating whether the given user has upload permissions.
"""
permissions = _get_permissions_query_for_user(
user_email, data_types.PermissionEntityKind.UPLOADER)
return bool(permissions.get())
def cc_users_for_job(job_type, security_flag):
"""Return external users that should be CC'ed according to the given rule.
Args:
job_type: The name of the job
security_flag: Whether or not the CC is for a security issue.
Returns:
A list of user emails that should be CC'ed.
"""
return _cc_users_for_entity(job_type, data_types.PermissionEntityKind.JOB,
security_flag)
|
|
import time
import pprint
import sqlalchemy
from testify import *
from dynochemy import db, Table, Solvent, View
from dynochemy import sql
from dynochemy import operation
from dynochemy import operation
class TestTable(Table):
name = "test"
hash_key = 'key'
class FullTestTable(Table):
name = "full_test"
hash_key = 'key'
range_key = 'range_key'
class SolventTestCase(TestCase):
@setup
def build_db(self):
engine = sqlalchemy.create_engine("sqlite://")
self.db = sql.SQLDB(engine)
self.db.register(TestTable)
self.db.register(FullTestTable)
class SimpleSolventTestCase(SolventTestCase):
def test(self):
solvent = Solvent()
put_op = solvent.put(TestTable, {'key': 'hello', 'value': 10.0})
result = solvent.run(self.db)
ret = result[put_op]
assert self.db.TestTable.get('hello')['value'] == 10
class GetSolventTestCase(SolventTestCase):
@setup
def build_entity(self):
self.db.TestTable.put({'key': 'hello', 'value': 25})
def test(self):
solvent = Solvent()
get_op = solvent.get(TestTable, 'hello')
result = solvent.run(self.db)
ret = result[get_op]
assert_equal(ret['value'], 25)
class SolventCapacityTestCase(TestCase):
@setup
def build_db(self):
engine = sqlalchemy.create_engine("sqlite://")
self.db = sql.SQLDB(engine)
@setup
def build_new_table(self):
class LimitedTestTable(Table):
name = "test"
hash_key = 'key'
write_capacity = 1.0
read_capacity = 1.0
self.db.register(LimitedTestTable)
self.LimitedTestTable = LimitedTestTable
def test(self):
solvent = Solvent()
put_op_1 = solvent.put(self.LimitedTestTable, {'key': 'hello', 'value': 25})
put_op_2 = solvent.put(self.LimitedTestTable, {'key': 'world', 'value': 100})
put_op_3 = solvent.put(self.LimitedTestTable, {'key': 'you', 'value': 0})
result = solvent.run(self.db)
for op in [put_op_1, put_op_2, put_op_3]:
result[op]
solvent = Solvent()
get_op_1 = solvent.get(self.LimitedTestTable, 'hello')
get_op_2 = solvent.get(self.LimitedTestTable, 'world')
get_op_3 = solvent.get(self.LimitedTestTable, 'you')
result = solvent.run(self.db)
for op in [get_op_1, get_op_2, get_op_3]:
entity = result[op]
assert entity['key']
class SolventSequenceTestCase(SolventTestCase):
@setup
def build_by_two_op(self):
class ByTwoOperation(operation.UpdateOperation):
def have_result(self, op_results, op_cb):
super(ByTwoOperation, self).have_result(op_results, op_cb)
# return an equivalent update operation
op_results.next_ops.append(operation.UpdateOperation(self.table, self.key, add=self.add))
self.op = ByTwoOperation(self.db.TestTable.__class__, 'rhettg', add={'value': 1})
def test(self):
solvent = Solvent()
solvent.add_operation(self.op)
solvent.run(self.db)
for res in self.db.TestTable.scan()():
assert_equal(res['value'], 2)
class SolventViewViewTestCase(SolventTestCase):
"""Verify that our view created operations go through views also"""
@setup
def build_views(self):
class TestView(View):
table = TestTable
view_table = TestTable
@classmethod
def add(cls, op, result):
entity = op.entity
if entity['key'] == 'A':
return [operation.PutOperation(cls.view_table, {'key': 'B'})]
if entity['key'] == 'B':
return [operation.PutOperation(cls.view_table, {'key': 'C'})]
return []
self.TestView = TestView
self.db.register(TestView)
def test(self):
solvent = Solvent()
solvent.put(TestTable, {'key': 'A'})
solvent.run(self.db)
ndx = 0
for res in self.db.TestTable.scan()():
ndx += 1
#pprint.pprint(res)
assert_equal(ndx, 3)
class SolventViewTestCase(SolventTestCase):
@setup
def build_view(self):
class ViewTable(Table):
name = "view_table"
hash_key = 'value'
self.ViewTable = ViewTable
self.db.register(ViewTable)
class TestView(View):
table = TestTable
view_table = ViewTable
@classmethod
def add(cls, op, result):
entity = op.entity
return [operation.UpdateOperation(cls.view_table, entity['value'], {'count': 1})]
@classmethod
def remove(cls, op, result):
return [operation.UpdateOperation(cls.view_table, result['value'], {'count': -1})]
self.TestView = TestView
self.db.register(TestView)
def test(self):
s = Solvent()
s.put(TestTable, {'key': '1', 'value': 'blue'})
s.put(TestTable, {'key': '2', 'value': 'green'})
s.put(TestTable, {'key': '3', 'value': 'blue'})
s.run(self.db)
assert_equal(self.db.ViewTable['green']['count'], 1)
assert_equal(self.db.ViewTable['blue']['count'], 2)
s = Solvent()
s.delete(TestTable, '1')
s.delete(TestTable, '2')
s.run(self.db)
assert_equal(self.db.ViewTable['green']['count'], 0)
assert_equal(self.db.ViewTable['blue']['count'], 1)
class SolventQueryTestCase(SolventTestCase):
@setup
def build_entities(self):
self.keys = []
for ndx in range(4):
entity = {'key': 'my_key', 'range_key': ndx}
self.db.FullTestTable.put(entity)
@setup
def change_query_limit(self):
self._old_limit = sql.DEFAULT_LIMIT
sql.DEFAULT_LIMIT = 2
@teardown
def restore_query_limit(self):
sql.DEFAULT_LIMIT = self._old_limit
def test(self):
s = Solvent()
q_op = s.query(FullTestTable, 'my_key')
q_op.range(0, 2)
q_op.limit(20)
result = s.run(self.db)
query_result = result[q_op]
entities = list(query_result)
assert_equal(len(entities), 3)
|
|
# implement samba_tool drs commands
#
# Copyright Andrew Tridgell 2010
#
# based on C implementation by Kamen Mazdrashki <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.getopt as options
import ldb
from samba.auth import system_session
from samba.netcmd import (
Command,
CommandError,
Option,
SuperCommand,
)
from samba.samdb import SamDB
from samba import drs_utils, nttime2string, dsdb
from samba.dcerpc import drsuapi, misc
import common
def drsuapi_connect(ctx):
'''make a DRSUAPI connection to the server'''
try:
(ctx.drsuapi, ctx.drsuapi_handle, ctx.bind_supported_extensions) = drs_utils.drsuapi_connect(ctx.server, ctx.lp, ctx.creds)
except Exception, e:
raise CommandError("DRS connection to %s failed" % ctx.server, e)
def samdb_connect(ctx):
'''make a ldap connection to the server'''
try:
ctx.samdb = SamDB(url="ldap://%s" % ctx.server,
session_info=system_session(),
credentials=ctx.creds, lp=ctx.lp)
except Exception, e:
raise CommandError("LDAP connection to %s failed" % ctx.server, e)
def drs_errmsg(werr):
'''return "was successful" or an error string'''
(ecode, estring) = werr
if ecode == 0:
return "was successful"
return "failed, result %u (%s)" % (ecode, estring)
def attr_default(msg, attrname, default):
'''get an attribute from a ldap msg with a default'''
if attrname in msg:
return msg[attrname][0]
return default
def drs_parse_ntds_dn(ntds_dn):
'''parse a NTDS DN returning a site and server'''
a = ntds_dn.split(',')
if a[0] != "CN=NTDS Settings" or a[2] != "CN=Servers" or a[4] != 'CN=Sites':
raise RuntimeError("bad NTDS DN %s" % ntds_dn)
server = a[1].split('=')[1]
site = a[3].split('=')[1]
return (site, server)
class cmd_drs_showrepl(Command):
"""Show replication status."""
synopsis = "%prog [<DC>] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ["DC?"]
def print_neighbour(self, n):
'''print one set of neighbour information'''
self.message("%s" % n.naming_context_dn)
try:
(site, server) = drs_parse_ntds_dn(n.source_dsa_obj_dn)
self.message("\t%s\%s via RPC" % (site, server))
except RuntimeError:
self.message("\tNTDS DN: %s" % n.source_dsa_obj_dn)
self.message("\t\tDSA object GUID: %s" % n.source_dsa_obj_guid)
self.message("\t\tLast attempt @ %s %s" % (nttime2string(n.last_attempt),
drs_errmsg(n.result_last_attempt)))
self.message("\t\t%u consecutive failure(s)." % n.consecutive_sync_failures)
self.message("\t\tLast success @ %s" % nttime2string(n.last_success))
self.message("")
def drsuapi_ReplicaInfo(ctx, info_type):
'''call a DsReplicaInfo'''
req1 = drsuapi.DsReplicaGetInfoRequest1()
req1.info_type = info_type
try:
(info_type, info) = ctx.drsuapi.DsReplicaGetInfo(ctx.drsuapi_handle, 1, req1)
except Exception, e:
raise CommandError("DsReplicaGetInfo of type %u failed" % info_type, e)
return (info_type, info)
def run(self, DC=None, sambaopts=None,
credopts=None, versionopts=None, server=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
drsuapi_connect(self)
samdb_connect(self)
# show domain information
ntds_dn = self.samdb.get_dsServiceName()
server_dns = self.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dnsHostName"])[0]['dnsHostName'][0]
(site, server) = drs_parse_ntds_dn(ntds_dn)
try:
ntds = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=['options', 'objectGUID', 'invocationId'])
except Exception, e:
raise CommandError("Failed to search NTDS DN %s" % ntds_dn)
conn = self.samdb.search(base=ntds_dn, expression="(objectClass=nTDSConnection)")
self.message("%s\\%s" % (site, server))
self.message("DSA Options: 0x%08x" % int(attr_default(ntds[0], "options", 0)))
self.message("DSA object GUID: %s" % self.samdb.schema_format_value("objectGUID", ntds[0]["objectGUID"][0]))
self.message("DSA invocationId: %s\n" % self.samdb.schema_format_value("objectGUID", ntds[0]["invocationId"][0]))
self.message("==== INBOUND NEIGHBORS ====\n")
(info_type, info) = self.drsuapi_ReplicaInfo(drsuapi.DRSUAPI_DS_REPLICA_INFO_NEIGHBORS)
for n in info.array:
self.print_neighbour(n)
self.message("==== OUTBOUND NEIGHBORS ====\n")
(info_type, info) = self.drsuapi_ReplicaInfo(drsuapi.DRSUAPI_DS_REPLICA_INFO_REPSTO)
for n in info.array:
self.print_neighbour(n)
reasons = ['NTDSCONN_KCC_GC_TOPOLOGY',
'NTDSCONN_KCC_RING_TOPOLOGY',
'NTDSCONN_KCC_MINIMIZE_HOPS_TOPOLOGY',
'NTDSCONN_KCC_STALE_SERVERS_TOPOLOGY',
'NTDSCONN_KCC_OSCILLATING_CONNECTION_TOPOLOGY',
'NTDSCONN_KCC_INTERSITE_GC_TOPOLOGY',
'NTDSCONN_KCC_INTERSITE_TOPOLOGY',
'NTDSCONN_KCC_SERVER_FAILOVER_TOPOLOGY',
'NTDSCONN_KCC_SITE_FAILOVER_TOPOLOGY',
'NTDSCONN_KCC_REDUNDANT_SERVER_TOPOLOGY']
self.message("==== KCC CONNECTION OBJECTS ====\n")
for c in conn:
c_rdn, sep, c_server_dn = c['fromServer'][0].partition(',')
c_server_res = self.samdb.search(base=c_server_dn, scope=ldb.SCOPE_BASE, attrs=["dnsHostName"])
c_server_dns = c_server_res[0]["dnsHostName"][0]
self.message("Connection --")
self.message("\tConnection name: %s" % c['name'][0])
self.message("\tEnabled : %s" % attr_default(c, 'enabledConnection', 'TRUE'))
self.message("\tServer DNS name : %s" % c_server_dns)
self.message("\tServer DN name : %s" % c['fromServer'][0])
self.message("\t\tTransportType: RPC")
self.message("\t\toptions: 0x%08X" % int(attr_default(c, 'options', 0)))
if not 'mS-DS-ReplicatesNCReason' in c:
self.message("Warning: No NC replicated for Connection!")
continue
for r in c['mS-DS-ReplicatesNCReason']:
a = str(r).split(':')
self.message("\t\tReplicatesNC: %s" % a[3])
self.message("\t\tReason: 0x%08x" % int(a[2]))
for s in reasons:
if getattr(dsdb, s, 0) & int(a[2]):
self.message("\t\t\t%s" % s)
class cmd_drs_kcc(Command):
"""Trigger knowledge consistency center run."""
synopsis = "%prog [<DC>] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ["DC?"]
def run(self, DC=None, sambaopts=None,
credopts=None, versionopts=None, server=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
drsuapi_connect(self)
req1 = drsuapi.DsExecuteKCC1()
try:
self.drsuapi.DsExecuteKCC(self.drsuapi_handle, 1, req1)
except Exception, e:
raise CommandError("DsExecuteKCC failed", e)
self.message("Consistency check on %s successful." % DC)
def drs_local_replicate(self, SOURCE_DC, NC):
'''replicate from a source DC to the local SAM'''
self.server = SOURCE_DC
drsuapi_connect(self)
self.local_samdb = SamDB(session_info=system_session(), url=None,
credentials=self.creds, lp=self.lp)
self.samdb = SamDB(url="ldap://%s" % self.server,
session_info=system_session(),
credentials=self.creds, lp=self.lp)
# work out the source and destination GUIDs
res = self.local_samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
self.ntds_dn = res[0]["dsServiceName"][0]
res = self.local_samdb.search(base=self.ntds_dn, scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
self.ntds_guid = misc.GUID(self.samdb.schema_format_value("objectGUID", res[0]["objectGUID"][0]))
source_dsa_invocation_id = misc.GUID(self.samdb.get_invocation_id())
destination_dsa_guid = self.ntds_guid
self.samdb.transaction_start()
repl = drs_utils.drs_Replicate("ncacn_ip_tcp:%s[seal]" % self.server, self.lp,
self.creds, self.local_samdb)
try:
repl.replicate(NC, source_dsa_invocation_id, destination_dsa_guid)
except Exception, e:
raise CommandError("Error replicating DN %s" % NC, e)
self.samdb.transaction_commit()
class cmd_drs_replicate(Command):
"""Replicate a naming context between two DCs."""
synopsis = "%prog <destinationDC> <sourceDC> <NC> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ["DEST_DC", "SOURCE_DC", "NC"]
takes_options = [
Option("--add-ref", help="use ADD_REF to add to repsTo on source", action="store_true"),
Option("--sync-forced", help="use SYNC_FORCED to force inbound replication", action="store_true"),
Option("--sync-all", help="use SYNC_ALL to replicate from all DCs", action="store_true"),
Option("--full-sync", help="resync all objects", action="store_true"),
Option("--local", help="pull changes directly into the local database (destination DC is ignored)", action="store_true"),
]
def run(self, DEST_DC, SOURCE_DC, NC,
add_ref=False, sync_forced=False, sync_all=False, full_sync=False,
local=False, sambaopts=None, credopts=None, versionopts=None, server=None):
self.server = DEST_DC
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
if local:
drs_local_replicate(self, SOURCE_DC, NC)
return
drsuapi_connect(self)
samdb_connect(self)
# we need to find the NTDS GUID of the source DC
msg = self.samdb.search(base=self.samdb.get_config_basedn(),
expression="(&(objectCategory=server)(|(name=%s)(dNSHostName=%s)))" % (
ldb.binary_encode(SOURCE_DC),
ldb.binary_encode(SOURCE_DC)),
attrs=[])
if len(msg) == 0:
raise CommandError("Failed to find source DC %s" % SOURCE_DC)
server_dn = msg[0]['dn']
msg = self.samdb.search(base=server_dn, scope=ldb.SCOPE_ONELEVEL,
expression="(|(objectCategory=nTDSDSA)(objectCategory=nTDSDSARO))",
attrs=['objectGUID', 'options'])
if len(msg) == 0:
raise CommandError("Failed to find source NTDS DN %s" % SOURCE_DC)
source_dsa_guid = msg[0]['objectGUID'][0]
dsa_options = int(attr_default(msg, 'options', 0))
req_options = 0
if not (dsa_options & dsdb.DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL):
req_options |= drsuapi.DRSUAPI_DRS_WRIT_REP
if add_ref:
req_options |= drsuapi.DRSUAPI_DRS_ADD_REF
if sync_forced:
req_options |= drsuapi.DRSUAPI_DRS_SYNC_FORCED
if sync_all:
req_options |= drsuapi.DRSUAPI_DRS_SYNC_ALL
if full_sync:
req_options |= drsuapi.DRSUAPI_DRS_FULL_SYNC_NOW
try:
drs_utils.sendDsReplicaSync(self.drsuapi, self.drsuapi_handle, source_dsa_guid, NC, req_options)
except drs_utils.drsException, estr:
raise CommandError("DsReplicaSync failed", estr)
self.message("Replicate from %s to %s was successful." % (SOURCE_DC, DEST_DC))
class cmd_drs_bind(Command):
"""Show DRS capabilities of a server."""
synopsis = "%prog [<DC>] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ["DC?"]
def run(self, DC=None, sambaopts=None,
credopts=None, versionopts=None, server=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
drsuapi_connect(self)
samdb_connect(self)
bind_info = drsuapi.DsBindInfoCtr()
bind_info.length = 28
bind_info.info = drsuapi.DsBindInfo28()
(info, handle) = self.drsuapi.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info)
optmap = [
("DRSUAPI_SUPPORTED_EXTENSION_BASE", "DRS_EXT_BASE"),
("DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION", "DRS_EXT_ASYNCREPL"),
("DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI", "DRS_EXT_REMOVEAPI"),
("DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2", "DRS_EXT_MOVEREQ_V2"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS", "DRS_EXT_GETCHG_DEFLATE"),
("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1", "DRS_EXT_DCINFO_V1"),
("DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION", "DRS_EXT_RESTORE_USN_OPTIMIZATION"),
("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY", "DRS_EXT_ADDENTRY"),
("DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE", "DRS_EXT_KCC_EXECUTE"),
("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2", "DRS_EXT_ADDENTRY_V2"),
("DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION", "DRS_EXT_LINKED_VALUE_REPLICATION"),
("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2", "DRS_EXT_DCINFO_V2"),
("DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD","DRS_EXT_INSTANCE_TYPE_NOT_REQ_ON_MOD"),
("DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND", "DRS_EXT_CRYPTO_BIND"),
("DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO", "DRS_EXT_GET_REPL_INFO"),
("DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION", "DRS_EXT_STRONG_ENCRYPTION"),
("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01", "DRS_EXT_DCINFO_VFFFFFFFF"),
("DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP", "DRS_EXT_TRANSITIVE_MEMBERSHIP"),
("DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY", "DRS_EXT_ADD_SID_HISTORY"),
("DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3", "DRS_EXT_POST_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V5", "DRS_EXT_GETCHGREQ_V5"),
("DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2", "DRS_EXT_GETMEMBERSHIPS2"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6", "DRS_EXT_GETCHGREQ_V6"),
("DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS", "DRS_EXT_NONDOMAIN_NCS"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8", "DRS_EXT_GETCHGREQ_V8"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5", "DRS_EXT_GETCHGREPLY_V5"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6", "DRS_EXT_GETCHGREPLY_V6"),
("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3", "DRS_EXT_WHISTLER_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7", "DRS_EXT_WHISTLER_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT", "DRS_EXT_WHISTLER_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_XPRESS_COMPRESS", "DRS_EXT_W2K3_DEFLATE"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10", "DRS_EXT_GETCHGREQ_V10"),
("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART2", "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART2"),
("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART3", "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART3")
]
optmap_ext = [
("DRSUAPI_SUPPORTED_EXTENSION_ADAM", "DRS_EXT_ADAM"),
("DRSUAPI_SUPPORTED_EXTENSION_LH_BETA2", "DRS_EXT_LH_BETA2"),
("DRSUAPI_SUPPORTED_EXTENSION_RECYCLE_BIN", "DRS_EXT_RECYCLE_BIN")]
self.message("Bind to %s succeeded." % DC)
self.message("Extensions supported:")
for (opt, str) in optmap:
optval = getattr(drsuapi, opt, 0)
if info.info.supported_extensions & optval:
yesno = "Yes"
else:
yesno = "No "
self.message(" %-60s: %s (%s)" % (opt, yesno, str))
if isinstance(info.info, drsuapi.DsBindInfo48):
self.message("\nExtended Extensions supported:")
for (opt, str) in optmap_ext:
optval = getattr(drsuapi, opt, 0)
if info.info.supported_extensions_ext & optval:
yesno = "Yes"
else:
yesno = "No "
self.message(" %-60s: %s (%s)" % (opt, yesno, str))
self.message("\nSite GUID: %s" % info.info.site_guid)
self.message("Repl epoch: %u" % info.info.repl_epoch)
if isinstance(info.info, drsuapi.DsBindInfo48):
self.message("Forest GUID: %s" % info.info.config_dn_guid)
class cmd_drs_options(Command):
"""Query or change 'options' for NTDS Settings object of a Domain Controller."""
synopsis = "%prog [<DC>] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ["DC?"]
takes_options = [
Option("--dsa-option", help="DSA option to enable/disable", type="str",
metavar="{+|-}IS_GC | {+|-}DISABLE_INBOUND_REPL | {+|-}DISABLE_OUTBOUND_REPL | {+|-}DISABLE_NTDSCONN_XLATE" ),
]
option_map = {"IS_GC": 0x00000001,
"DISABLE_INBOUND_REPL": 0x00000002,
"DISABLE_OUTBOUND_REPL": 0x00000004,
"DISABLE_NTDSCONN_XLATE": 0x00000008}
def run(self, DC=None, dsa_option=None,
sambaopts=None, credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
samdb_connect(self)
ntds_dn = self.samdb.get_dsServiceName()
res = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=["options"])
dsa_opts = int(res[0]["options"][0])
# print out current DSA options
cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts]
self.message("Current DSA options: " + ", ".join(cur_opts))
# modify options
if dsa_option:
if dsa_option[:1] not in ("+", "-"):
raise CommandError("Unknown option %s" % dsa_option)
flag = dsa_option[1:]
if flag not in self.option_map.keys():
raise CommandError("Unknown option %s" % dsa_option)
if dsa_option[:1] == "+":
dsa_opts |= self.option_map[flag]
else:
dsa_opts &= ~self.option_map[flag]
#save new options
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, ntds_dn)
m["options"]= ldb.MessageElement(str(dsa_opts), ldb.FLAG_MOD_REPLACE, "options")
self.samdb.modify(m)
# print out new DSA options
cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts]
self.message("New DSA options: " + ", ".join(cur_opts))
class cmd_drs(SuperCommand):
"""Directory Replication Services (DRS) management."""
subcommands = {}
subcommands["bind"] = cmd_drs_bind()
subcommands["kcc"] = cmd_drs_kcc()
subcommands["replicate"] = cmd_drs_replicate()
subcommands["showrepl"] = cmd_drs_showrepl()
subcommands["options"] = cmd_drs_options()
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a FBcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a FBcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
from .joystickpad import JoystickPad
from .touchdata import TouchData
from kivy.uix.widget import Widget
from kivy.properties import(BooleanProperty, NumericProperty,
ListProperty, ReferenceListProperty)
import math
OUTLINE_ZERO = 0.00000000001
# replaces user's 0 value for outlines, avoids invalid width exception
class Joystick(Widget):
'''The joystick base is comprised of an outer circle & an inner circle.
The joystick pad is another circle,
which the user can move within the base.
All 3 of these elements can be styled independently
to create different effects.
All coordinate properties are based on the
position of the joystick pad.'''
'''####################################################################'''
'''##### > Properties (Customizable) ############################'''
'''####################################################################'''
outer_size = NumericProperty(1)
inner_size = NumericProperty(0.75)
pad_size = NumericProperty(0.5)
'''Sizes are defined by percentage,
1.0 being 100%, of the total widget size.
The smallest value of widget.width & widget.height
is used as a baseline for these percentages.'''
outer_background_color = ListProperty([0.75, 0.75, 0.75, 1])
inner_background_color = ListProperty([0.75, 0.75, 0.75, 1])
pad_background_color = ListProperty([0.4, 0.4, 0.4, 1])
'''Background colors for the joystick base & pad'''
outer_line_color = ListProperty([0.25, 0.25, 0.25, 1])
inner_line_color = ListProperty([0.7, 0.7, 0.7, 1])
pad_line_color = ListProperty([0.35, 0.35, 0.35, 1])
'''Border colors for the joystick base & pad'''
outer_line_width = NumericProperty(0.01)
inner_line_width = NumericProperty(0.01)
pad_line_width = NumericProperty(0.01)
'''Outline widths for the joystick base & pad.
Outline widths are defined by percentage,
1.0 being 100%, of the total widget size.'''
sticky = BooleanProperty(False)
'''When False, the joystick will snap back to center on_touch_up.
When True, the joystick will maintain its final position
at the time of on_touch_up.'''
'''####################################################################'''
'''##### > Properties (Read-Only) ###############################'''
'''####################################################################'''
pad_x = NumericProperty(0.0)
pad_y = NumericProperty(0.0)
pad = ReferenceListProperty(pad_x, pad_y)
'''pad values are touch coordinates in relation to
the center of the joystick.
pad_x & pad_y return values between -1.0 & 1.0.
pad returns a tuple of pad_x & pad_y, and is the best property to
bind to in order to receive updates from the joystick.'''
@property
def magnitude(self):
return self._magnitude
'''distance of the pad, between 0.0 & 1.0,
from the center of the joystick.'''
@property
def radians(self):
return self._radians
'''degrees of the pad, between 0.0 & 360.0, in relation to the x-axis.'''
@property
def angle(self):
return math.degrees(self.radians)
'''position of the pad in radians, between 0.0 & 6.283,
in relation to the x-axis.'''
'''magnitude, radians, & angle can be used to
calculate polar coordinates'''
'''####################################################################'''
'''##### > Properties (Private) #################################'''
'''####################################################################'''
_outer_line_width = NumericProperty(OUTLINE_ZERO)
_inner_line_width = NumericProperty(OUTLINE_ZERO)
_pad_line_width = NumericProperty(OUTLINE_ZERO)
_total_diameter = NumericProperty(0)
_total_radius = NumericProperty(0)
_inner_diameter = NumericProperty(0)
_inner_radius = NumericProperty(0)
_outer_diameter = NumericProperty(0)
_outer_radius = NumericProperty(0)
_magnitude = 0
@property
def _radians(self):
if not(self.pad_y and self.pad_x):
return 0
arc_tangent = math.atan(self.pad_y / self.pad_x)
if self.pad_x > 0 and self.pad_y > 0: # 1st Quadrant
return arc_tangent
elif self.pad_x > 0 and self.pad_y < 0: # 4th Quadrant
return (math.pi * 2) + arc_tangent
else: # 2nd & 3rd Quadrants
return math.pi + arc_tangent
@property
def _radius_difference(self):
return (self._total_radius - self.ids.pad._radius)
'''####################################################################'''
'''##### > Pad Control ##########################################'''
'''####################################################################'''
def move_pad(self, touch, from_touch_down):
td = TouchData(self, touch)
if td.is_external and from_touch_down:
touch.ud['joystick'] = None
return False
elif td.in_range:
self._update_coordinates_from_internal_touch(touch, td)
return True
elif not(td.in_range):
self._update_coordinates_from_external_touch(td)
return True
def center_pad(self):
self.ids.pad.center = self.center
self._magnitude = 0
self.pad_x = 0
self.pad_y = 0
def _update_coordinates_from_external_touch(self, touchdata):
td = touchdata
pad_distance = self._radius_difference * (1.0 / td.relative_distance)
x_distance_offset = -td.x_distance * pad_distance
y_distance_offset = -td.y_distance * pad_distance
x = self.center_x + x_distance_offset
y = self.center_y + y_distance_offset
radius_offset = pad_distance / self._radius_difference
self.pad_x = td.x_offset * radius_offset
self.pad_y = td.y_offset * radius_offset
self._magnitude = 1.0
self.ids.pad.center = (x, y)
def _update_coordinates_from_internal_touch(self, touch, touchdata):
td = touchdata
self.pad_x = td.x_offset / self._radius_difference
self.pad_y = td.y_offset / self._radius_difference
self._magnitude = td.relative_distance / \
(self._total_radius - self.ids.pad._radius)
self.ids.pad.center = (touch.x, touch.y)
'''####################################################################'''
'''##### > Layout Events ########################################'''
'''####################################################################'''
def do_layout(self):
if 'pad' in self.ids:
size = min(*self.size)
self._update_outlines(size)
self._update_circles(size)
self._update_pad()
def on_size(self, *args):
self.do_layout()
def on_pos(self, *args):
self.do_layout()
def add_widget(self, widget):
super(Joystick, self).add_widget(widget)
self.do_layout()
def remove_widget(self, widget):
super(Joystick, self).remove_widget(widget)
self.do_layout()
def _update_outlines(self, size):
self._outer_line_width = (self.outer_line_width * size) \
if(self.outer_line_width) else(OUTLINE_ZERO)
self._inner_line_width = (self.inner_line_width * size) \
if(self.inner_line_width) else(OUTLINE_ZERO)
self.ids.pad._line_width = (self.pad_line_width * size) \
if(self.pad_line_width) else(OUTLINE_ZERO)
def _update_circles(self, size):
self._total_diameter = size
self._total_radius = self._total_diameter / 2
self._outer_diameter = \
(self._total_diameter - self._outer_line_width) * self.outer_size
self._outer_radius = self._outer_diameter / 2
self.ids.pad._diameter = self._total_diameter * self.pad_size
self.ids.pad._radius = self.ids.pad._diameter / 2
self._inner_diameter = \
(self._total_diameter - self._inner_line_width) * self.inner_size
self._inner_radius = self._inner_diameter / 2
def _update_pad(self):
self.ids.pad.center = self.center
self.ids.pad._background_color = self.pad_background_color
self.ids.pad._line_color = self.pad_line_color
'''####################################################################'''
'''##### > Touch Events #########################################'''
'''####################################################################'''
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
touch.ud['joystick'] = self
return self.move_pad(touch, from_touch_down=True)
return super(Joystick, self).on_touch_down(touch)
def on_touch_move(self, touch):
if self._touch_is_active(touch):
return self.move_pad(touch, from_touch_down=False)
return super(Joystick, self).on_touch_move(touch)
def on_touch_up(self, touch):
if self._touch_is_active(touch) and not(self.sticky):
self.center_pad()
return True
return super(Joystick, self).on_touch_up(touch)
def _touch_is_active(self, touch):
return 'joystick' in touch.ud and touch.ud['joystick'] == self
|
|
import os
import tempfile
from glob import glob
from os import getcwd
from os.path import basename, exists, join
from nose.tools import raises
from parameterized import param, parameterized
from rsmtool import compute_and_save_predictions
from rsmtool.configuration_parser import Configuration
from rsmtool.test_utils import (check_file_output,
check_generated_output,
check_report,
check_run_prediction,
check_scaled_coefficients,
copy_data_files,
do_run_experiment,
do_run_prediction)
# allow test directory to be set via an environment variable
# which is needed for package testing
TEST_DIR = os.environ.get('TESTDIR', None)
if TEST_DIR:
rsmtool_test_dir = TEST_DIR
else:
from rsmtool.test_utils import rsmtool_test_dir
@parameterized([
param('lr-predict'),
param('lr-predict-with-score'),
param('lr-predict-missing-values', excluded=True),
param('lr-predict-with-subgroups'),
param('lr-predict-with-candidate'),
param('lr-predict-illegal-transformations', excluded=True),
param('lr-predict-tsv-input-files'),
param('lr-predict-xlsx-input-files'),
param('lr-predict-jsonlines-input-files'),
param('lr-predict-nested-jsonlines-input-files'),
param('lr-predict-no-standardization'),
param('lr-predict-with-tsv-output', file_format='tsv'),
param('lr-predict-with-xlsx-output', file_format='xlsx'),
param('logistic-regression-predict'),
param('logistic-regression-predict-expected-scores'),
param('svc-predict-expected-scores'),
param('lr-predict-with-custom-tolerance'),
param('lr-predict-no-tolerance')
])
def test_run_experiment_parameterized(*args, **kwargs):
if TEST_DIR:
kwargs['given_test_dir'] = TEST_DIR
check_run_prediction(*args, **kwargs)
# Check that both rsmtool and rsmpredict generate the same files
def test_run_experiment_lr_rsmtool_and_rsmpredict():
source = 'lr-rsmtool-rsmpredict'
experiment_id = 'lr_rsmtool_rsmpredict'
rsmtool_config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'{}.json'.format(experiment_id))
do_run_experiment(source, experiment_id, rsmtool_config_file)
rsmpredict_config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, rsmpredict_config_file)
output_dir = join('test_outputs', source, 'output')
expected_output_dir = join(rsmtool_test_dir, 'data', 'experiments', source, 'output')
csv_files = glob(join(output_dir, '*.csv'))
html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))
# Check the results for rsmtool
for csv_file in csv_files:
csv_filename = basename(csv_file)
expected_csv_file = join(expected_output_dir, csv_filename)
if exists(expected_csv_file):
yield check_file_output, csv_file, expected_csv_file
yield check_scaled_coefficients, output_dir, experiment_id
yield check_generated_output, csv_files, experiment_id, 'rsmtool'
yield check_report, html_report, True, False
# check that the rsmpredict generated the same results
for csv_pair in [('predictions.csv',
'{}_pred_processed.csv'.format(experiment_id)),
('preprocessed_features.csv',
'{}_test_preprocessed_features.csv'.format(experiment_id))]:
output_file = join(output_dir, csv_pair[0])
expected_output_file = join(expected_output_dir, csv_pair[1])
yield check_file_output, output_file, expected_output_file
def test_run_experiment_lr_predict_with_object():
"""Test rsmpredict using the Configuration object, rather than a file."""
source = 'lr-predict-object'
configdir = join(rsmtool_test_dir,
'data',
'experiments',
source)
config_dict = {"id_column": "ID",
"input_features_file": "../../files/test.csv",
"experiment_dir": "existing_experiment",
"experiment_id": "lr"
}
config_obj = Configuration(config_dict,
context='rsmpredict',
configdir=configdir)
check_run_prediction(source,
given_test_dir=rsmtool_test_dir,
config_obj_or_dict=config_obj)
def test_run_experiment_lr_predict_with_dictionary():
"""Test rsmpredict using the dictionary object, rather than a file."""
source = 'lr-predict-dict'
# set up a temporary directory since
# we will be using getcwd
temp_dir = tempfile.TemporaryDirectory(prefix=getcwd())
old_file_dict = {'feature_file': 'data/files/test.csv',
'experiment_dir': 'data/experiments/lr-predict-dict/existing_experiment'}
new_file_dict = copy_data_files(temp_dir.name,
old_file_dict,
rsmtool_test_dir)
config_dict = {"id_column": "ID",
"input_features_file": new_file_dict['feature_file'],
"experiment_dir": new_file_dict['experiment_dir'],
"experiment_id": "lr"}
check_run_prediction(source,
given_test_dir=rsmtool_test_dir,
config_obj_or_dict=config_dict)
@raises(ValueError)
def test_run_experiment_lr_predict_with_repeated_ids():
# rsmpredict experiment with non-unique ids
source = 'lr-predict-with-repeated-ids'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(ValueError)
def test_compute_predictions_wrong_input_format():
config_list = [('experiment_id', 'AAAA'),
('train_file', 'some_path')]
with tempfile.TemporaryDirectory() as temp_dir:
compute_and_save_predictions(config_list, temp_dir)
@raises(FileNotFoundError)
def test_run_experiment_lr_predict_missing_model_file():
"""Run rsmpredict experiment with missing model file."""
source = 'lr-predict-missing-model-file'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(FileNotFoundError)
def test_run_experiment_lr_predict_missing_feature_file():
"""Run rsmpredict experiment with missing feature file."""
source = 'lr-predict-missing-feature-file'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(FileNotFoundError)
def test_run_experiment_lr_predict_missing_postprocessing_file():
"""Run rsmpredict experiment with missing post-processing file."""
source = 'lr-predict-missing-postprocessing-file'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(FileNotFoundError)
def test_run_experiment_predict_no_input_feature_file():
"""Run rsmpredict experiment with missing feature file."""
source = 'lr-predict-no-input-feature-file'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(FileNotFoundError)
def test_run_experiment_predict_no_experiment_dir():
"""Run rsmpredict experiment with missing experiment dir."""
source = 'lr-predict-no-experiment-dir'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(FileNotFoundError)
def test_run_experiment_predict_no_output_dir():
"""Run rsmpredict experiment with a missing "output" directory."""
source = 'lr-predict-no-output-dir'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(FileNotFoundError)
def test_run_experiment_predict_no_experiment_id():
"""Run rsmpredict experiment with no experiment ID."""
source = 'lr-predict-no-experiment-id'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(KeyError)
def test_run_experiment_lr_predict_missing_columns():
"""Run rsmpredict experiment with missing columns from the config file."""
source = 'lr-predict-missing-columns'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(KeyError)
def test_run_experiment_lr_predict_missing_feature():
"""Run rsmpredict experiment with missing features."""
source = 'lr-predict-missing-feature'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(ValueError)
def test_run_experiment_lr_predict_no_numeric_feature_values():
"""Run rsmpredict experiment with missing post-processing file."""
source = 'lr-predict-no-numeric-feature-values'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(ValueError)
def test_run_experiment_predict_expected_scores_builtin_model():
"""Run rsmpredict experiment for expected scores with unsupported built-in model."""
source = 'lr-predict-expected-scores-builtin-model'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(ValueError)
def test_run_experiment_predict_expected_scores_wrong_skll_model():
"""Run rsmpredict experiment for expected scores with an unsupported SKLL learner."""
source = 'predict-expected-scores-wrong-skll-model'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
@raises(ValueError)
def test_run_experiment_predict_expected_scores_non_probablistic_svc():
"""Run rsmpredict experiment for expected scores with a non-probabilistic learner."""
source = 'predict-expected-scores-non-probabilistic-svc'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmpredict.json')
do_run_prediction(source, config_file)
|
|
""" Tested with pefile 1.2.10-123 on 32bit PE executable files.
An implementation to push or pop a section header to the section table of a PE file.
For further information refer to the docstrings of pop_back/push_back.
by n0p
"""
import pefile
class SectionDoublePError(Exception):
pass
class SectionDoubleP:
def __init__(self, pe):
self.pe = pe
def __adjust_optional_header(self):
""" Recalculates the SizeOfImage, SizeOfCode, SizeOfInitializedData and
SizeOfUninitializedData of the optional header.
"""
# SizeOfImage = ((VirtualAddress + VirtualSize) of the new last section)
self.pe.OPTIONAL_HEADER.SizeOfImage = (self.pe.sections[-1].VirtualAddress +
self.pe.sections[-1].Misc_VirtualSize)
self.pe.OPTIONAL_HEADER.SizeOfCode = 0
self.pe.OPTIONAL_HEADER.SizeOfInitializedData = 0
self.pe.OPTIONAL_HEADER.SizeOfUninitializedData = 0
# Recalculating the sizes by iterating over every section and checking if
# the appropriate characteristics are set.
for section in self.pe.sections:
if section.Characteristics & 0x00000020:
# Section contains code.
self.pe.OPTIONAL_HEADER.SizeOfCode += section.SizeOfRawData
if section.Characteristics & 0x00000040:
# Section contains initialized data.
self.pe.OPTIONAL_HEADER.SizeOfInitializedData += section.SizeOfRawData
if section.Characteristics & 0x00000080:
# Section contains uninitialized data.
self.pe.OPTIONAL_HEADER.SizeOfUninitializedData += section.SizeOfRawData
def __add_header_space(self):
""" To make space for a new section header a buffer filled with nulls is added at the
end of the headers. The buffer has the size of one file alignment.
The data between the last section header and the end of the headers is copied to
the new space (everything moved by the size of one file alignment). If any data
directory entry points to the moved data the pointer is adjusted.
"""
FileAlignment = self.pe.OPTIONAL_HEADER.FileAlignment
SizeOfHeaders = self.pe.OPTIONAL_HEADER.SizeOfHeaders
data = '\x00' * FileAlignment
# Adding the null buffer.
self.pe.__data__ = (self.pe.__data__[:SizeOfHeaders] + data +
self.pe.__data__[SizeOfHeaders:])
section_table_offset = (self.pe.DOS_HEADER.e_lfanew + 4 +
self.pe.FILE_HEADER.sizeof() + self.pe.FILE_HEADER.SizeOfOptionalHeader)
# Copying the data between the last section header and SizeOfHeaders to the newly allocated
# space.
new_section_offset = section_table_offset + self.pe.FILE_HEADER.NumberOfSections*0x28
size = SizeOfHeaders - new_section_offset
data = self.pe.get_data(new_section_offset, size)
self.pe.set_bytes_at_offset(new_section_offset + FileAlignment, data)
# Filling the space, from which the data was copied from, with NULLs.
self.pe.set_bytes_at_offset(new_section_offset, '\x00' * FileAlignment)
data_directory_offset = section_table_offset - self.pe.OPTIONAL_HEADER.NumberOfRvaAndSizes * 0x8
# Checking data directories if anything points to the space between the last section header
# and the former SizeOfHeaders. If that's the case the pointer is increased by FileAlignment.
for data_offset in xrange(data_directory_offset, section_table_offset, 0x8):
data_rva = self.pe.get_dword_from_offset(data_offset)
if new_section_offset <= data_rva and data_rva < SizeOfHeaders:
self.pe.set_dword_at_offset(data_offset, data_rva + FileAlignment)
SizeOfHeaders_offset = (self.pe.DOS_HEADER.e_lfanew + 4 +
self.pe.FILE_HEADER.sizeof() + 0x3C)
# Adjusting the SizeOfHeaders value.
self.pe.set_dword_at_offset(SizeOfHeaders_offset, SizeOfHeaders + FileAlignment)
section_raw_address_offset = section_table_offset + 0x14
# The raw addresses of the sections are adjusted.
for section in self.pe.sections:
if section.PointerToRawData != 0:
self.pe.set_dword_at_offset(section_raw_address_offset, section.PointerToRawData+FileAlignment)
section_raw_address_offset += 0x28
# All changes in this method were made to the raw data (__data__). To make these changes
# accessbile in self.pe __data__ has to be parsed again. Since a new pefile is parsed during
# the init method, the easiest way is to replace self.pe with a new pefile based on __data__
# of the old self.pe.
self.pe = pefile.PE(data=self.pe.__data__)
def __is_null_data(self, data):
""" Checks if the given data contains just null bytes.
"""
for char in data:
if char != '\x00':
return False
return True
def push_back(self, Name=".NewSec", VirtualSize=0x00000000, VirtualAddress=0x00000000,
RawSize=0x00000000, RawAddress=0x00000000, RelocAddress=0x00000000,
Linenumbers=0x00000000, RelocationsNumber=0x0000, LinenumbersNumber=0x0000,
Characteristics=0xE00000E0, Data=""):
""" Adds the section, specified by the functions parameters, at the end of the section
table.
If the space to add an additional section header is insufficient, a buffer is inserted
after SizeOfHeaders. Data between the last section header and the end of SizeOfHeaders
is copied to +1 FileAlignment. Data directory entries pointing to this data are fixed.
A call with no parameters creates the same section header as LordPE does. But for the
binary to be executable without errors a VirtualSize > 0 has to be set.
If a RawSize > 0 is set or Data is given the data gets aligned to the FileAlignment and
is attached at the end of the file.
"""
if self.pe.FILE_HEADER.NumberOfSections == len(self.pe.sections):
FileAlignment = self.pe.OPTIONAL_HEADER.FileAlignment
SectionAlignment = self.pe.OPTIONAL_HEADER.SectionAlignment
if len(Name) > 8:
raise SectionDoublePError("The name is too long for a section.")
if ( VirtualAddress < (self.pe.sections[-1].Misc_VirtualSize +
self.pe.sections[-1].VirtualAddress)
or VirtualAddress % SectionAlignment != 0):
if (self.pe.sections[-1].Misc_VirtualSize % SectionAlignment) != 0:
VirtualAddress = \
(self.pe.sections[-1].VirtualAddress + self.pe.sections[-1].Misc_VirtualSize -
(self.pe.sections[-1].Misc_VirtualSize % SectionAlignment) + SectionAlignment)
else:
VirtualAddress = \
(self.pe.sections[-1].VirtualAddress + self.pe.sections[-1].Misc_VirtualSize)
if VirtualSize < len(Data):
VirtualSize = len(Data)
if (len(Data) % FileAlignment) != 0:
# Padding the data of the section.
Data += '\x00' * (FileAlignment - (len(Data) % FileAlignment))
if RawSize != len(Data):
if ( RawSize > len(Data)
and (RawSize % FileAlignment) == 0):
Data += '\x00' * (RawSize - (len(Data) % RawSize))
else:
RawSize = len(Data)
section_table_offset = (self.pe.DOS_HEADER.e_lfanew + 4 +
self.pe.FILE_HEADER.sizeof() + self.pe.FILE_HEADER.SizeOfOptionalHeader)
# If the new section header exceeds the SizeOfHeaders there won't be enough space
# for an additional section header. Besides that it's checked if the 0x28 bytes
# (size of one section header) after the last current section header are filled
# with nulls/ are free to use.
if ( self.pe.OPTIONAL_HEADER.SizeOfHeaders <
section_table_offset + (self.pe.FILE_HEADER.NumberOfSections+1)*0x28
or not self.__is_null_data(self.pe.get_data(section_table_offset +
(self.pe.FILE_HEADER.NumberOfSections)*0x28, 0x28))):
# Checking if more space can be added.
if self.pe.OPTIONAL_HEADER.SizeOfHeaders < self.pe.sections[0].VirtualAddress:
self.__add_header_space()
else:
raise SectionDoublePError("No more space can be added for the section header.")
# The validity check of RawAddress is done after space for a new section header may
# have been added because if space had been added the PointerToRawData of the previous
# section would have changed.
if (RawAddress != (self.pe.sections[-1].PointerToRawData +
self.pe.sections[-1].SizeOfRawData)):
RawAddress = \
(self.pe.sections[-1].PointerToRawData + self.pe.sections[-1].SizeOfRawData)
# Appending the data of the new section to the file.
if len(Data) > 0:
self.pe.__data__ = (self.pe.__data__[:RawAddress] + Data + \
self.pe.__data__[RawAddress:])
section_offset = section_table_offset + self.pe.FILE_HEADER.NumberOfSections*0x28
# Manually writing the data of the section header to the file.
self.pe.set_bytes_at_offset(section_offset, Name)
self.pe.set_dword_at_offset(section_offset+0x08, VirtualSize)
self.pe.set_dword_at_offset(section_offset+0x0C, VirtualAddress)
self.pe.set_dword_at_offset(section_offset+0x10, RawSize)
self.pe.set_dword_at_offset(section_offset+0x14, RawAddress)
self.pe.set_dword_at_offset(section_offset+0x18, RelocAddress)
self.pe.set_dword_at_offset(section_offset+0x1C, Linenumbers)
self.pe.set_word_at_offset(section_offset+0x20, RelocationsNumber)
self.pe.set_word_at_offset(section_offset+0x22, LinenumbersNumber)
self.pe.set_dword_at_offset(section_offset+0x24, Characteristics)
self.pe.FILE_HEADER.NumberOfSections +=1
# Parsing the section table of the file again to add the new section to the sections
# list of pefile.
self.pe.parse_sections(section_table_offset)
self.__adjust_optional_header()
else:
raise SectionDoublePError("The NumberOfSections specified in the file header and the " +
"size of the sections list of pefile don't match.")
return self.pe
|
|
# coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from distutils.util import strtobool
import functools
from functools import partial
import re
import itertools as it
import os
from typing import Dict, Sequence, Union
import sys
import unittest
import warnings
import zlib
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import numpy.random as npr
import scipy
import tensorflow.compat.v2 as tf
from trax.tf_numpy.jax_tests.config import flags, bool_env
import trax.tf_numpy.extensions as npe
import trax.tf_numpy.numpy as tf_np
tree_map = tf.nest.map_structure
tree_multimap = tf.nest.map_structure
FLAGS = flags.FLAGS
# TODO(wangpeng): Remove this flag after broken tests are fixed
flags.DEFINE_bool('enable_x64',
strtobool('False'),
'Enable 64-bit types to be used.')
flags.DEFINE_enum(
'test_dut', '',
enum_values=['', 'cpu', 'gpu', 'tpu'],
help=
'Describes the device under test in case special consideration is required.'
)
flags.DEFINE_integer(
'num_generated_cases',
10,
help='Number of generated cases to test')
EPS = 1e-4
# Default dtypes corresponding to Python scalars.
python_scalar_dtypes = {
bool: onp.dtype(onp.bool_),
int: onp.dtype(onp.int_),
float: onp.dtype(onp.float_),
complex: onp.dtype(onp.complex_),
}
def _dtype(x):
if isinstance(x, tf.Tensor):
return x.dtype.as_numpy_dtype
return (getattr(x, 'dtype', None) or
onp.dtype(python_scalar_dtypes.get(type(x), None)) or
onp.asarray(x).dtype)
def is_sequence(x):
try:
iter(x)
except TypeError:
return False
else:
return True
_default_tolerance = {
onp.dtype(onp.bool_): 0,
onp.dtype(onp.int8): 0,
onp.dtype(onp.int16): 0,
onp.dtype(onp.int32): 0,
onp.dtype(onp.int64): 0,
onp.dtype(onp.uint8): 0,
onp.dtype(onp.uint16): 0,
onp.dtype(onp.uint32): 0,
onp.dtype(onp.uint64): 0,
# TODO(b/154768983): onp.dtype(dtypes.bfloat16): 1e-2,
onp.dtype(onp.float16): 1e-3,
onp.dtype(onp.float32): 1e-6,
onp.dtype(onp.float64): 1e-15,
onp.dtype(onp.complex64): 1e-6,
onp.dtype(onp.complex128): 1e-15,
}
def default_tolerance():
return _default_tolerance
default_gradient_tolerance = {
# TODO(b/154768983): onp.dtype(dtypes.bfloat16): 1e-1,
onp.dtype(onp.float16): 1e-2,
onp.dtype(onp.float32): 2e-3,
onp.dtype(onp.float64): 1e-5,
onp.dtype(onp.complex64): 1e-3,
onp.dtype(onp.complex128): 1e-5,
}
def _assert_numpy_allclose(a, b, atol=None, rtol=None):
# TODO(b/154768983):
# a = a.astype(onp.float32) if a.dtype == dtypes.bfloat16 else a
# b = b.astype(onp.float32) if b.dtype == dtypes.bfloat16 else b
kw = {}
if atol: kw["atol"] = atol
if rtol: kw["rtol"] = rtol
onp.testing.assert_allclose(a, b, **kw)
def tolerance(dtype, tol=None):
tol = {} if tol is None else tol
if not isinstance(tol, dict):
return tol
tol = {onp.dtype(key): value for key, value in tol.items()}
dtype = onp.dtype(dtype)
return tol.get(dtype, default_tolerance()[dtype])
def _normalize_tolerance(tol):
tol = tol or 0
if isinstance(tol, dict):
return {onp.dtype(k): v for k, v in tol.items()}
else:
return {k: tol for k in _default_tolerance}
def join_tolerance(tol1, tol2):
tol1 = _normalize_tolerance(tol1)
tol2 = _normalize_tolerance(tol2)
out = tol1
for k, v in tol2.items():
out[k] = max(v, tol1.get(k, 0))
return out
def _assert_numpy_close(a, b, atol=None, rtol=None):
assert a.shape == b.shape
atol = max(tolerance(a.dtype, atol), tolerance(b.dtype, atol))
rtol = max(tolerance(a.dtype, rtol), tolerance(b.dtype, rtol))
_assert_numpy_allclose(a, b, atol=atol * a.size, rtol=rtol * b.size)
def check_eq(xs, ys):
tree_all(tree_multimap(_assert_numpy_allclose, xs, ys))
def check_close(xs, ys, atol=None, rtol=None):
assert_close = partial(_assert_numpy_close, atol=atol, rtol=rtol)
tree_all(tree_multimap(assert_close, xs, ys))
def inner_prod(xs, ys):
def contract(x, y):
return onp.real(onp.dot(onp.conj(x).reshape(-1), y.reshape(-1)))
return tree_reduce(onp.add, tree_multimap(contract, xs, ys))
add = partial(tree_multimap, lambda x, y: onp.add(x, y, dtype=_dtype(x)))
sub = partial(tree_multimap, lambda x, y: onp.subtract(x, y, dtype=_dtype(x)))
conj = partial(tree_map, lambda x: onp.conj(x, dtype=_dtype(x)))
def scalar_mul(xs, a):
return tree_map(lambda x: onp.multiply(x, a, dtype=_dtype(x)), xs)
def rand_like(rng, x):
shape = onp.shape(x)
dtype = _dtype(x)
randn = lambda: onp.asarray(rng.randn(*shape), dtype=dtype)
if onp.issubdtype(dtype, onp.complexfloating):
return randn() + dtype.type(1.0j) * randn()
else:
return randn()
def numerical_jvp(f, primals, tangents, eps=EPS):
delta = scalar_mul(tangents, eps)
f_pos = f(*add(primals, delta))
f_neg = f(*sub(primals, delta))
return scalar_mul(sub(f_pos, f_neg), 0.5 / eps)
def _merge_tolerance(tol, default):
if tol is None:
return default
if not isinstance(tol, dict):
return tol
out = default.copy()
for k, v in tol.items():
out[onp.dtype(k)] = v
return out
def check_jvp(f, f_jvp, args, atol=None, rtol=None, eps=EPS):
atol = _merge_tolerance(atol, default_gradient_tolerance)
rtol = _merge_tolerance(rtol, default_gradient_tolerance)
rng = onp.random.RandomState(0)
tangent = tree_map(partial(rand_like, rng), args)
v_out, t_out = f_jvp(args, tangent)
v_out_expected = f(*args)
t_out_expected = numerical_jvp(f, args, tangent, eps=eps)
# In principle we should expect exact equality of v_out and v_out_expected,
# but due to nondeterminism especially on GPU (e.g., due to convolution
# autotuning) we only require "close".
check_close(v_out, v_out_expected, atol=atol, rtol=rtol)
check_close(t_out, t_out_expected, atol=atol, rtol=rtol)
def check_vjp(f, f_vjp, args, atol=None, rtol=None, eps=EPS):
atol = _merge_tolerance(atol, default_gradient_tolerance)
rtol = _merge_tolerance(rtol, default_gradient_tolerance)
_rand_like = partial(rand_like, onp.random.RandomState(0))
v_out, vjpfun = f_vjp(*args)
v_out_expected = f(*args)
check_close(v_out, v_out_expected, atol=atol, rtol=rtol)
tangent = tree_map(_rand_like, args)
tangent_out = numerical_jvp(f, args, tangent, eps=eps)
cotangent = tree_map(_rand_like, v_out)
cotangent_out = conj(vjpfun(conj(cotangent)))
ip = inner_prod(tangent, cotangent_out)
ip_expected = inner_prod(tangent_out, cotangent)
check_close(ip, ip_expected, atol=atol, rtol=rtol)
@contextmanager
def count_primitive_compiles():
xla.xla_primitive_callable.cache_clear()
# We count how many times we call primitive_computation (which is called
# inside xla_primitive_callable) instead of xla_primitive_callable so we don't
# count cache hits.
primitive_computation = xla.primitive_computation
count = [0]
def primitive_computation_and_count(*args, **kwargs):
count[0] += 1
return primitive_computation(*args, **kwargs)
xla.primitive_computation = primitive_computation_and_count
try:
yield count
finally:
xla.primitive_computation = primitive_computation
@contextmanager
def count_jit_and_pmap_compiles():
# No need to clear any caches since we generally jit and pmap fresh callables
# in tests.
jaxpr_subcomp = xla.jaxpr_subcomp
count = [0]
def jaxpr_subcomp_and_count(*args, **kwargs):
count[0] += 1
return jaxpr_subcomp(*args, **kwargs)
xla.jaxpr_subcomp = jaxpr_subcomp_and_count
try:
yield count
finally:
xla.jaxpr_subcomp = jaxpr_subcomp
def device_under_test():
return FLAGS.test_dut
def if_device_under_test(device_type: Union[str, Sequence[str]],
if_true, if_false):
"""Chooses `if_true` of `if_false` based on device_under_test."""
if device_under_test() in ([device_type] if isinstance(device_type, str)
else device_type):
return if_true
else:
return if_false
def supported_dtypes():
if device_under_test() == "tpu":
return {onp.bool_, onp.int32, onp.uint32, dtypes.bfloat16, onp.float32,
onp.complex64}
else:
return {onp.bool_, onp.int8, onp.int16, onp.int32, onp.int64,
onp.uint8, onp.uint16, onp.uint32, onp.uint64,
dtypes.bfloat16, onp.float16, onp.float32, onp.float64,
onp.complex64, onp.complex128}
def skip_if_unsupported_type(dtype):
if dtype not in supported_dtypes():
raise unittest.SkipTest(
f"Type {dtype} not supported on {device_under_test()}")
def skip_on_devices(*disabled_devices):
"""A decorator for test methods to skip the test on certain devices."""
def skip(test_method):
@functools.wraps(test_method)
def test_method_wrapper(self, *args, **kwargs):
device = device_under_test()
if device in disabled_devices:
test_name = getattr(test_method, '__name__', '[unknown test]')
raise unittest.SkipTest(
f"{test_name} not supported on {device.upper()}.")
return test_method(self, *args, **kwargs)
return test_method_wrapper
return skip
def skip_on_flag(flag_name, skip_value):
"""A decorator for test methods to skip the test when flags are set."""
def skip(test_method): # pylint: disable=missing-docstring
@functools.wraps(test_method)
def test_method_wrapper(self, *args, **kwargs):
flag_value = getattr(FLAGS, flag_name)
if flag_value == skip_value:
test_name = getattr(test_method, '__name__', '[unknown test]')
raise unittest.SkipTest(
f"{test_name} not supported when FLAGS.{flag_name} is {flag_value}")
return test_method(self, *args, **kwargs)
return test_method_wrapper
return skip
# TODO(phawkins): workaround for bug https://github.com/google/jax/issues/432
# Delete this code after the minimum jaxlib version is 0.1.46 or greater.
skip_on_mac_linalg_bug = partial(
unittest.skipIf,
(sys.platform == "darwin" and scipy.version.version > "1.1.0" and
lib.version < (0, 1, 46)),
"Test fails on Mac with new scipy (issue #432)")
def format_test_name_suffix(opname, shapes, dtypes):
arg_descriptions = (format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))
return '{}_{}'.format(opname.capitalize(), '_'.join(arg_descriptions))
# We use special symbols, represented as singleton objects, to distinguish
# between NumPy scalars, Python scalars, and 0-D arrays.
class ScalarShape(object):
def __len__(self): return 0
class _NumpyScalar(ScalarShape): pass
class _PythonScalar(ScalarShape): pass
NUMPY_SCALAR_SHAPE = _NumpyScalar()
PYTHON_SCALAR_SHAPE = _PythonScalar()
def _dims_of_shape(shape):
"""Converts `shape` to a tuple of dimensions."""
if type(shape) in (list, tuple):
return shape
elif isinstance(shape, ScalarShape):
return ()
else:
raise TypeError(type(shape))
def _cast_to_shape(value, shape, dtype):
"""Casts `value` to the correct Python type for `shape` and `dtype`."""
if shape is NUMPY_SCALAR_SHAPE:
# explicitly cast to NumPy scalar in case `value` is a Python scalar.
return onp.dtype(dtype).type(value)
elif shape is PYTHON_SCALAR_SHAPE:
# explicitly cast to Python scalar via https://stackoverflow.com/a/11389998
return onp.asarray(value).item()
elif type(shape) in (list, tuple):
assert onp.shape(value) == tuple(shape)
return value
else:
raise TypeError(type(shape))
def dtype_str(dtype):
return onp.dtype(dtype).name
def format_shape_dtype_string(shape, dtype):
if shape is NUMPY_SCALAR_SHAPE:
return dtype_str(dtype)
elif shape is PYTHON_SCALAR_SHAPE:
return 'py' + dtype_str(dtype)
elif type(shape) in (list, tuple):
shapestr = ','.join(str(dim) for dim in shape)
return '{}[{}]'.format(dtype_str(dtype), shapestr)
elif type(shape) is int:
return '{}[{},]'.format(dtype_str(dtype), shape)
elif isinstance(shape, onp.ndarray):
return '{}[{}]'.format(dtype_str(dtype), shape)
else:
raise TypeError(type(shape))
def _rand_dtype(rand, shape, dtype, scale=1., post=lambda x: x):
"""Produce random values given shape, dtype, scale, and post-processor.
Args:
rand: a function for producing random values of a given shape, e.g. a
bound version of either onp.RandomState.randn or onp.RandomState.rand.
shape: a shape value as a tuple of positive integers.
dtype: a numpy dtype.
scale: optional, a multiplicative scale for the random values (default 1).
post: optional, a callable for post-processing the random values (default
identity).
Returns:
An ndarray of the given shape and dtype using random values based on a call
to rand but scaled, converted to the appropriate dtype, and post-processed.
"""
r = lambda: onp.asarray(scale * rand(*_dims_of_shape(shape)), dtype)
if onp.issubdtype(dtype, onp.complexfloating):
vals = r() + 1.0j * r()
else:
vals = r()
return _cast_to_shape(onp.asarray(post(vals), dtype), shape, dtype)
def rand_default(scale=3):
randn = npr.RandomState(0).randn
return partial(_rand_dtype, randn, scale=scale)
def rand_nonzero():
post = lambda x: onp.where(x == 0, onp.array(1, dtype=x.dtype), x)
randn = npr.RandomState(0).randn
return partial(_rand_dtype, randn, scale=3, post=post)
def rand_positive():
post = lambda x: x + 1
rand = npr.RandomState(0).rand
return partial(_rand_dtype, rand, scale=2, post=post)
def rand_small():
randn = npr.RandomState(0).randn
return partial(_rand_dtype, randn, scale=1e-3)
def rand_not_small(offset=10.):
post = lambda x: x + onp.where(x > 0, offset, -offset)
randn = npr.RandomState(0).randn
return partial(_rand_dtype, randn, scale=3., post=post)
def rand_small_positive():
rand = npr.RandomState(0).rand
return partial(_rand_dtype, rand, scale=2e-5)
def rand_uniform(low=0.0, high=1.0):
assert low < high
rand = npr.RandomState(0).rand
post = lambda x: x * (high - low) + low
return partial(_rand_dtype, rand, post=post)
def rand_some_equal():
randn = npr.RandomState(0).randn
rng = npr.RandomState(0)
def post(x):
x_ravel = x.ravel()
if len(x_ravel) == 0:
return x
flips = rng.rand(*onp.shape(x)) < 0.5
return onp.where(flips, x_ravel[0], x)
return partial(_rand_dtype, randn, scale=100., post=post)
def rand_some_inf():
"""Return a random sampler that produces infinities in floating types."""
rng = npr.RandomState(1)
base_rand = rand_default()
"""
TODO: Complex numbers are not correctly tested
If blocks should be switched in order, and relevant tests should be fixed
"""
def rand(shape, dtype):
"""The random sampler function."""
if not onp.issubdtype(dtype, onp.floating):
# only float types have inf
return base_rand(shape, dtype)
if onp.issubdtype(dtype, onp.complexfloating):
base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
onp.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
dims = _dims_of_shape(shape)
posinf_flips = rng.rand(*dims) < 0.1
neginf_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = onp.where(posinf_flips, onp.array(onp.inf, dtype=dtype), vals)
vals = onp.where(neginf_flips, onp.array(-onp.inf, dtype=dtype), vals)
return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)
return rand
def rand_some_nan():
"""Return a random sampler that produces nans in floating types."""
rng = npr.RandomState(1)
base_rand = rand_default()
def rand(shape, dtype):
"""The random sampler function."""
if onp.issubdtype(dtype, onp.complexfloating):
base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
onp.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
if not onp.issubdtype(dtype, onp.floating):
# only float types have inf
return base_rand(shape, dtype)
dims = _dims_of_shape(shape)
nan_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = onp.where(nan_flips, onp.array(onp.nan, dtype=dtype), vals)
return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)
return rand
def rand_some_inf_and_nan():
"""Return a random sampler that produces infinities in floating types."""
rng = npr.RandomState(1)
base_rand = rand_default()
"""
TODO: Complex numbers are not correctly tested
If blocks should be switched in order, and relevant tests should be fixed
"""
def rand(shape, dtype):
"""The random sampler function."""
if not onp.issubdtype(dtype, onp.floating):
# only float types have inf
return base_rand(shape, dtype)
if onp.issubdtype(dtype, onp.complexfloating):
base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
onp.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
dims = _dims_of_shape(shape)
posinf_flips = rng.rand(*dims) < 0.1
neginf_flips = rng.rand(*dims) < 0.1
nan_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = onp.where(posinf_flips, onp.array(onp.inf, dtype=dtype), vals)
vals = onp.where(neginf_flips, onp.array(-onp.inf, dtype=dtype), vals)
vals = onp.where(nan_flips, onp.array(onp.nan, dtype=dtype), vals)
return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)
return rand
# TODO(mattjj): doesn't handle complex types
def rand_some_zero():
"""Return a random sampler that produces some zeros."""
rng = npr.RandomState(1)
base_rand = rand_default()
def rand(shape, dtype):
"""The random sampler function."""
dims = _dims_of_shape(shape)
zeros = rng.rand(*dims) < 0.5
vals = base_rand(shape, dtype)
vals = onp.where(zeros, onp.array(0, dtype=dtype), vals)
return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)
return rand
def rand_int(low, high=None):
randint = npr.RandomState(0).randint
def fn(shape, dtype):
return randint(low, high=high, size=shape, dtype=dtype)
return fn
def rand_unique_int():
randchoice = npr.RandomState(0).choice
def fn(shape, dtype):
return randchoice(onp.arange(onp.prod(shape), dtype=dtype),
size=shape, replace=False)
return fn
def rand_bool():
rng = npr.RandomState(0)
def generator(shape, dtype):
return _cast_to_shape(rng.rand(*_dims_of_shape(shape)) < 0.5, shape, dtype)
return generator
def check_raises(thunk, err_type, msg):
try:
thunk()
assert False
except err_type as e:
assert str(e).startswith(msg), "\n{}\n\n{}\n".format(e, msg)
def check_raises_regexp(thunk, err_type, pattern):
try:
thunk()
assert False
except err_type as e:
assert re.match(pattern, str(e)), "{}\n\n{}\n".format(e, pattern)
def _iter_eqns(jaxpr):
# TODO(necula): why doesn't this search in params?
for eqn in jaxpr.eqns:
yield eqn
for subjaxpr in core.subjaxprs(jaxpr):
yield from _iter_eqns(subjaxpr)
def assert_dot_precision(expected_precision, fun, *args):
jaxpr = api.make_jaxpr(fun)(*args)
precisions = [eqn.params['precision'] for eqn in _iter_eqns(jaxpr.jaxpr)
if eqn.primitive == lax.dot_general_p]
for precision in precisions:
msg = "Unexpected precision: {} != {}".format(expected_precision, precision)
assert precision == expected_precision, msg
_CACHED_INDICES: Dict[int, Sequence[int]] = {}
def cases_from_list(xs):
xs = list(xs)
n = len(xs)
k = min(n, FLAGS.num_generated_cases)
# Random sampling for every parameterized test is expensive. Do it once and
# cache the result.
indices = _CACHED_INDICES.get(n)
if indices is None:
rng = npr.RandomState(42)
_CACHED_INDICES[n] = indices = rng.permutation(n)
return [xs[i] for i in indices[:k]]
def cases_from_gens(*gens):
sizes = [1, 3, 10]
cases_per_size = int(FLAGS.num_generated_cases / len(sizes)) + 1
for size in sizes:
for i in range(cases_per_size):
yield ('_{}_{}'.format(size, i),) + tuple(gen(size) for gen in gens)
def to_np(a):
return tf.nest.map_structure(tf_np.asarray, a)
def to_tf_fn(f):
return lambda *args: f(*to_np(args))
class TestCase(parameterized.TestCase):
"""Base class for tests including numerical checks and boilerplate."""
# copied from jax.test_util
def setUp(self):
super().setUp()
self._rng = npr.RandomState(zlib.adler32(self._testMethodName.encode()))
# copied from jax.test_util
def rng(self):
return self._rng
# TODO(mattjj): this obscures the error messages from failures, figure out how
# to re-enable it
# def tearDown(self) -> None:
# assert core.reset_trace_state()
def assertArraysAllClose(self, x, y, check_dtypes, atol=None, rtol=None):
"""Assert that x and y are close (up to numerical tolerances)."""
self.assertEqual(x.shape, y.shape)
atol = max(tolerance(_dtype(x), atol), tolerance(_dtype(y), atol))
rtol = max(tolerance(_dtype(x), rtol), tolerance(_dtype(y), rtol))
_assert_numpy_allclose(x, y, atol=atol, rtol=rtol)
if check_dtypes:
self.assertDtypesMatch(x, y)
def assertDtypesMatch(self, x, y):
if FLAGS.enable_x64:
self.assertEqual(_dtype(x), _dtype(y))
def assertAllClose(self, x, y, check_dtypes, atol=None, rtol=None):
"""Assert that x and y, either arrays or nested tuples/lists, are close."""
if isinstance(x, dict):
self.assertIsInstance(y, dict)
self.assertEqual(set(x.keys()), set(y.keys()))
for k in x:
self.assertAllClose(x[k], y[k], check_dtypes, atol=atol, rtol=rtol)
elif is_sequence(x) and not hasattr(x, '__array__'):
self.assertTrue(is_sequence(y) and not hasattr(y, '__array__'))
self.assertEqual(len(x), len(y))
for x_elt, y_elt in zip(x, y):
self.assertAllClose(x_elt, y_elt, check_dtypes, atol=atol, rtol=rtol)
elif hasattr(x, '__array__') or onp.isscalar(x):
self.assertTrue(hasattr(y, '__array__') or onp.isscalar(y))
if check_dtypes:
self.assertDtypesMatch(x, y)
x = onp.asarray(x)
y = onp.asarray(y)
self.assertArraysAllClose(x, y, check_dtypes=False, atol=atol, rtol=rtol)
elif x == y:
return
else:
raise TypeError((type(x), type(y)))
def assertMultiLineStrippedEqual(self, expected, what):
"""Asserts two strings are equal, after stripping each line."""
ignore_space_re = re.compile(r'\s*\n\s*')
expected_clean = re.sub(ignore_space_re, '\n', expected.strip())
what_clean = re.sub(ignore_space_re, '\n', what.strip())
self.assertMultiLineEqual(expected_clean, what_clean,
msg="Found\n{}\nExpecting\n{}".format(what, expected))
def _CheckAgainstNumpy(self, numpy_reference_op, lax_op, args_maker,
check_dtypes=True, tol=None):
args = args_maker()
lax_ans = lax_op(*args)
numpy_ans = numpy_reference_op(*args)
self.assertAllClose(numpy_ans, lax_ans, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
def _CompileAndCheck(self,
fun,
args_maker,
check_dtypes=True,
rtol=None,
atol=None,
check_eval_on_shapes=True,
check_incomplete_shape=True,
check_unknown_rank=True,
static_argnums=(),
check_experimental_compile=True,
check_xla_forced_compile=True):
"""Compiles the function and checks the results.
Args:
fun: the function to be checked.
args_maker: a callable that returns a tuple which will be used as the
positional arguments.
check_dtypes: whether to check that the result dtypes from non-compiled
and compiled runs agree.
rtol: relative tolerance for allclose assertions.
atol: absolute tolerance for allclose assertions.
check_eval_on_shapes: whether to run `eval_on_shapes` on the function and
check that the result shapes and dtypes are correct.
check_incomplete_shape: whether to check that the function can handle
incomplete shapes (including those with and without a known rank).
check_unknown_rank: (only has effect when check_incomplete_shape is True)
whether to check that the function can handle unknown ranks.
static_argnums: indices of arguments to be treated as static arguments for
`jit` and `eval_on_shapes`.
check_experimental_compile: whether to check compilation with
experimental_compile=True (in addition to compilation without the flag).
check_xla_forced_compile: whether to check compilation with
forced_compile=True (in addition to compilation without the flag). This
flag is different from experimental_compile because it enforces
whole-function compilation while the latter doesn't. TPU requires
whole-function compilation.
"""
args = args_maker()
for x in args:
if not hasattr(x, 'dtype'):
# If there is a input that doesn't have dtype info, jit and
# eval_on_shapes may pick a different dtype for it than numpy, so we
# skip the dtype check.
check_dtypes = False
python_ans = fun(*args)
python_shapes = tf.nest.map_structure(lambda x: onp.shape(x), python_ans)
onp_shapes = tf.nest.map_structure(lambda x: onp.shape(onp.asarray(x)),
python_ans)
self.assertEqual(python_shapes, onp_shapes)
def check_compile(**kwargs):
# `wrapped_fun` and `python_should_be_executing` are used to check that
# when the jitted function is called the second time, the original Python
# function won't be executed.
def wrapped_fun(*args):
self.assertTrue(python_should_be_executing)
return fun(*args)
cfun = npe.jit(wrapped_fun, static_argnums=static_argnums, **kwargs)
python_should_be_executing = True
monitored_ans = cfun(*args)
python_should_be_executing = False
compiled_ans = cfun(*args)
self.assertAllClose(python_ans, monitored_ans, check_dtypes, atol, rtol)
self.assertAllClose(python_ans, compiled_ans, check_dtypes, atol, rtol)
# Run `cfun` with a different set of arguments to check that changing
# arguments won't cause recompilation.
new_args = args_maker()
skip_retracing_test = False
for old, new in zip(tf.nest.flatten(args), tf.nest.flatten(new_args)):
if npe.most_precise_int_dtype(old) != npe.most_precise_int_dtype(new):
# If the old and new arguments result in different dtypes (because
# they fall into different value ranges), tf-numpy will retrace, so we
# skip the no-retrace test.
skip_retracing_test = True
if not skip_retracing_test:
python_should_be_executing = True
new_python_ans = fun(*new_args)
python_should_be_executing = False
compiled_ans = cfun(*new_args)
self.assertAllClose(new_python_ans, compiled_ans, check_dtypes, atol,
rtol)
check_compile()
if check_experimental_compile:
check_compile(experimental_compile=True)
if check_xla_forced_compile:
check_compile(xla_forced_compile=True)
if check_eval_on_shapes:
# Check that npe.eval_on_shapes can get complete output shapes given
# complete input shapes.
cfun = npe.eval_on_shapes(fun, static_argnums=static_argnums)
compiled_ans = cfun(*args)
flat_python_ans = tf.nest.flatten(python_ans)
flat_compiled_ans = tf.nest.flatten(compiled_ans)
self.assertEqual(len(flat_python_ans), len(flat_compiled_ans))
for a, b in zip(flat_python_ans, flat_compiled_ans):
if hasattr(a, 'shape'):
self.assertEqual(a.shape, b.shape)
if check_dtypes and hasattr(a, 'dtype'):
self.assertEqual(tf.as_dtype(a.dtype), b.dtype)
# If some argument doesn't have a `dtype` attr (e.g. a Python scalar), we
# skip incomplete-shape checks, since shape specs need dtype. It's OK to
# skip since the same incomplete-shape checks will run for []-shaped arrays.
if check_incomplete_shape and all(hasattr(x, 'dtype') for x in args):
# Check partial shapes with known ranks.
# Numpy scalars (created by e.g. np.int32(5)) have `dtype` but not
# `shape`.
if all(hasattr(x, 'shape') for x in args):
specs = [tf.TensorSpec([None] * len(x.shape), x.dtype) for x in args]
cfun = npe.jit(
fun, static_argnums=static_argnums, input_signature=specs)
compiled_ans = cfun(*args)
self.assertAllClose(python_ans, compiled_ans, check_dtypes, atol, rtol)
if check_unknown_rank:
# Check unknown ranks.
specs = [tf.TensorSpec(None, x.dtype) for x in args]
cfun = npe.jit(
fun, static_argnums=static_argnums, input_signature=specs)
compiled_ans = cfun(*args)
self.assertAllClose(python_ans, compiled_ans, check_dtypes, atol, rtol)
def check_grads(self, f, args, atol=None, rtol=None, delta=None):
"""Check gradients against finite differences.
Args:
f: function to check at ``f(*args)``.
args: a list or tuple of argument values.
atol: absolute tolerance for gradient equality.
rtol: relative tolerance for gradient equality.
delta: step size used for finite differences.
"""
if delta is None:
# Optimal stepsize for central difference is O(epsilon^{1/3}).
dtype = tf_np.result_type(*args)
epsilon = onp.finfo(dtype).eps
delta = epsilon ** (1.0 / 3.0)
theoretical, numerical = tf.test.compute_gradient(
to_tf_fn(f), args, delta=delta)
self.assertAllClose(theoretical, numerical, check_dtypes=False, atol=atol,
rtol=rtol)
@contextmanager
def ignore_warning(**kw):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", **kw)
yield
def disable(_):
def wrapper(self, *args, **kwargs):
self.skipTest('Test is disabled')
return wrapper
|
|
"""
This module contains essential stuff that should've come with Python itself ;)
"""
import os
import re
import inspect
import weakref
import errno
import six
from functools import partial, wraps
from scrapy.utils.decorators import deprecated
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
>>> flatten(["foo", "bar"])
['foo', 'bar']
>>> flatten(["foo", ["baz", 42], "bar"])
['foo', 'baz', 42, 'bar']
"""
return list(iflatten(x))
def iflatten(x):
"""iflatten(sequence) -> iterator
Similar to ``.flatten()``, but returns iterator instead"""
for el in x:
if is_listlike(el):
for el_ in iflatten(el):
yield el_
else:
yield el
def is_listlike(x):
"""
>>> is_listlike("foo")
False
>>> is_listlike(5)
False
>>> is_listlike(b"foo")
False
>>> is_listlike([b"foo"])
True
>>> is_listlike((b"foo",))
True
>>> is_listlike({})
True
>>> is_listlike(set())
True
>>> is_listlike((x for x in range(3)))
True
>>> is_listlike(six.moves.xrange(5))
True
"""
return hasattr(x, "__iter__") and not isinstance(x, (six.text_type, bytes))
def unique(list_, key=lambda x: x):
"""efficient function to uniquify a list preserving item order"""
seen = set()
result = []
for item in list_:
seenkey = key(item)
if seenkey in seen:
continue
seen.add(seenkey)
result.append(item)
return result
@deprecated("scrapy.utils.python.to_unicode")
def str_to_unicode(text, encoding=None, errors='strict'):
""" This function is deprecated.
Please use scrapy.utils.python.to_unicode. """
return to_unicode(text, encoding, errors)
@deprecated("scrapy.utils.python.to_bytes")
def unicode_to_str(text, encoding=None, errors='strict'):
""" This function is deprecated. Please use scrapy.utils.python.to_bytes """
return to_bytes(text, encoding, errors)
def to_unicode(text, encoding=None, errors='strict'):
"""Return the unicode representation of a bytes object `text`. If `text`
is already an unicode object, return it as-is."""
if isinstance(text, six.text_type):
return text
if not isinstance(text, (bytes, six.text_type)):
raise TypeError('to_unicode must receive a bytes, str or unicode '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.decode(encoding, errors)
def to_bytes(text, encoding=None, errors='strict'):
"""Return the binary representation of `text`. If `text`
is already a bytes object, return it as-is."""
if isinstance(text, bytes):
return text
if not isinstance(text, six.string_types):
raise TypeError('to_bytes must receive a unicode, str or bytes '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.encode(encoding, errors)
def to_native_str(text, encoding=None, errors='strict'):
""" Return str representation of `text`
(bytes in Python 2.x and unicode in Python 3.x). """
if six.PY2:
return to_bytes(text, encoding, errors)
else:
return to_unicode(text, encoding, errors)
def re_rsearch(pattern, text, chunk_size=1024):
"""
This function does a reverse search in a text using a regular expression
given in the attribute 'pattern'.
Since the re module does not provide this functionality, we have to find for
the expression into chunks of text extracted from the end (for the sake of efficiency).
At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for
the pattern. If the pattern is not found, another chunk is extracted, and another
search is performed.
This process continues until a match is found, or until the whole file is read.
In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing
the start position of the match, and the ending (regarding the entire text).
"""
def _chunk_iter():
offset = len(text)
while True:
offset -= (chunk_size * 1024)
if offset <= 0:
break
yield (text[offset:], offset)
yield (text, 0)
if isinstance(pattern, six.string_types):
pattern = re.compile(pattern)
for chunk, offset in _chunk_iter():
matches = [match for match in pattern.finditer(chunk)]
if matches:
start, end = matches[-1].span()
return offset + start, offset + end
return None
def memoizemethod_noargs(method):
"""Decorator to cache the result of a method (without arguments) using a
weak reference to its object
"""
cache = weakref.WeakKeyDictionary()
@wraps(method)
def new_method(self, *args, **kwargs):
if self not in cache:
cache[self] = method(self, *args, **kwargs)
return cache[self]
return new_method
_BINARYCHARS = {six.b(chr(i)) for i in range(32)} - {b"\0", b"\t", b"\n", b"\r"}
_BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}
@deprecated("scrapy.utils.python.binary_is_text")
def isbinarytext(text):
""" This function is deprecated.
Please use scrapy.utils.python.binary_is_text, which was created to be more
clear about the functions behavior: it is behaving inverted to this one. """
return not binary_is_text(text)
def binary_is_text(data):
""" Returns `True` if the given ``data`` argument (a ``bytes`` object)
does not contain unprintable control characters.
"""
if not isinstance(data, bytes):
raise TypeError("data must be bytes, got '%s'" % type(data).__name__)
return all(c not in _BINARYCHARS for c in data)
def get_func_args(func, stripself=False):
"""Return the argument name list of a callable"""
if inspect.isfunction(func):
func_args, _, _, _ = inspect.getargspec(func)
elif inspect.isclass(func):
return get_func_args(func.__init__, True)
elif inspect.ismethod(func):
return get_func_args(func.__func__, True)
elif inspect.ismethoddescriptor(func):
return []
elif isinstance(func, partial):
return [x for x in get_func_args(func.func)[len(func.args):]
if not (func.keywords and x in func.keywords)]
elif hasattr(func, '__call__'):
if inspect.isroutine(func):
return []
elif getattr(func, '__name__', None) == '__call__':
return []
else:
return get_func_args(func.__call__, True)
else:
raise TypeError('%s is not callable' % type(func))
if stripself:
func_args.pop(0)
return func_args
def get_spec(func):
"""Returns (args, kwargs) tuple for a function
>>> import re
>>> get_spec(re.match)
(['pattern', 'string'], {'flags': 0})
>>> class Test(object):
... def __call__(self, val):
... pass
... def method(self, val, flags=0):
... pass
>>> get_spec(Test)
(['self', 'val'], {})
>>> get_spec(Test.method)
(['self', 'val'], {'flags': 0})
>>> get_spec(Test().method)
(['self', 'val'], {'flags': 0})
"""
if inspect.isfunction(func) or inspect.ismethod(func):
spec = inspect.getargspec(func)
elif hasattr(func, '__call__'):
spec = inspect.getargspec(func.__call__)
else:
raise TypeError('%s is not callable' % type(func))
defaults = spec.defaults or []
firstdefault = len(spec.args) - len(defaults)
args = spec.args[:firstdefault]
kwargs = dict(zip(spec.args[firstdefault:], defaults))
return args, kwargs
def equal_attributes(obj1, obj2, attributes):
"""Compare two objects attributes"""
# not attributes given return False by default
if not attributes:
return False
temp1, temp2 = object(), object()
for attr in attributes:
# support callables like itemgetter
if callable(attr):
if attr(obj1) != attr(obj2):
return False
elif getattr(obj1, attr, temp1) != getattr(obj2, attr, temp2):
return False
# all attributes equal
return True
class WeakKeyCache(object):
def __init__(self, default_factory):
self.default_factory = default_factory
self._weakdict = weakref.WeakKeyDictionary()
def __getitem__(self, key):
if key not in self._weakdict:
self._weakdict[key] = self.default_factory(key)
return self._weakdict[key]
@deprecated
def stringify_dict(dct_or_tuples, encoding='utf-8', keys_only=True):
"""Return a (new) dict with unicode keys (and values when "keys_only" is
False) of the given dict converted to strings. `dct_or_tuples` can be a
dict or a list of tuples, like any dict constructor supports.
"""
d = {}
for k, v in six.iteritems(dict(dct_or_tuples)):
k = k.encode(encoding) if isinstance(k, six.text_type) else k
if not keys_only:
v = v.encode(encoding) if isinstance(v, six.text_type) else v
d[k] = v
return d
@deprecated
def is_writable(path):
"""Return True if the given path can be written (if it exists) or created
(if it doesn't exist)
"""
if os.path.exists(path):
return os.access(path, os.W_OK)
else:
return os.access(os.path.dirname(path), os.W_OK)
@deprecated
def setattr_default(obj, name, value):
"""Set attribute value, but only if it's not already set. Similar to
setdefault() for dicts.
"""
if not hasattr(obj, name):
setattr(obj, name, value)
def retry_on_eintr(function, *args, **kw):
"""Run a function and retry it while getting EINTR errors"""
while True:
try:
return function(*args, **kw)
except IOError as e:
if e.errno != errno.EINTR:
raise
def without_none_values(iterable):
"""Return a copy of `iterable` with all `None` entries removed.
If `iterable` is a mapping, return a dictionary where all pairs that have
value `None` have been removed.
"""
try:
return {k: v for k, v in six.iteritems(iterable) if v is not None}
except AttributeError:
return type(iterable)((v for v in iterable if v is not None))
def global_object_name(obj):
"""
Return full name of a global object.
>>> from scrapy import Request
>>> global_object_name(Request)
'scrapy.http.request.Request'
"""
return "%s.%s" % (obj.__module__, obj.__name__)
|
|
"""A module containing analytical objects specific to a particular experiment.
"""
import os
import numpy as np
import pandas as pd
from whaler.analysis import Analysis
class Reactions():
"""
"""
def __init__(self):
self.A = Analysis()
# Analysis output filenames.
self.crude_N2_out = "crudeN2_Es.csv"
self.N2_act_out = "N2_act_Es.csv"
self.N2_bond_out = "N2_act_bonds.csv"
# Physical constants.
self.kB = 3.1668114/1000000
self.temp = 298.15
self.kcal_eH = 627.509
def write_crude_N2(self):
"""
"""
self.A.write_data(
"cruderxn", self.crude_N2_out, self.crude_N2_act(), format='%.1f')
def write_N2_act(self):
"""
"""
self.A.write_data(
"N2act", self.N2_act_out, self.therm_N2_act(), format='%.1f')
def write_N2_bonds(self):
"""
"""
self.A.write_data(
"bonds", self.N2_bond_out, self.MMN2_bonds(), format='%.3f')
def MMN2_bonds(self):
"""Tabulates the M-M, M-N, and N-N bond lengths in M2(L)4, M2(L)4N, and
M2(L)4N2 structures.
"""
# Generate structure sets.
short_gEs = self.A.gEs.dropna(axis=0, how='all')
base_structs = {
struct : short_gEs.loc[struct, 'Ground State']
for struct in short_gEs.index if struct[-1] == '4'
}
N_structs = {
struct : short_gEs.loc[struct, 'Ground State']
for struct in short_gEs.index if struct[-2:] == '4N'
}
N2_structs = {
struct : short_gEs.loc[struct, 'Ground State']
for struct in short_gEs.index if struct[-3:] == '4N2'
}
# Acquire bond lengths.
gs_M_M = {
struct : self.A.bondlength(struct, state, 'M', 'M', 'z')
for struct,state in base_structs.items()
}
es_M_M = {
struct : self.A.bondlength(struct,
self.A.spinflip[state], 'M', 'M', 'z')
for struct,state in base_structs.items()
}
gs_M_MN = {
struct[:-1] : self.A.bondlength(struct, state, 'M', 'M', 'z')
for struct,state in N_structs.items()
}
gs_M_MN2 = {
struct[:-2] : self.A.bondlength(struct, state, 'M', 'M', 'z')
for struct,state in N2_structs.items()
}
gs_M2_N = {
struct[:-1] : self.A.bondlength(struct, state, 'M', 'N', 'z')
for struct,state in N_structs.items()
}
gs_M2_N2 = {
struct[:-2] : self.A.bondlength(struct, state, 'M', 'N', 'z', 1)
for struct,state in N2_structs.items()
}
gs_M2N_N = {
struct[:-2] : self.A.bondlength(struct, state, 'N', 'N', 'z')
for struct,state in N2_structs.items()
}
# Construct the data table.
headers = [
'M-M gs', 'M-M es', 'M-MN2', 'M2-N2', 'M2N-N', 'M-MN', 'M2-N']
results = [
gs_M_M, es_M_M, gs_M_MN2, gs_M2_N2, gs_M2N_N, gs_M_MN, gs_M2_N]
resultsdict = {k:v for k,v in zip(headers, results)}
lengths = pd.DataFrame.from_dict(data=resultsdict, orient='columns')
lengths = lengths[headers]
print(lengths)
return lengths
def crude_N2_act(self):
"""Subtracts the crude (geo) energy of each M2(L)4 structure and N2 from
the corresponding M2(L)4N and M2(L)4N2 structures, tabulating the
results in kcal/mol.
"""
# Make a dictionary of all structures with ground state energies.
short_gEs = self.A.gEs.dropna(axis=0, how='all')
struct_Es = {
struct : short_gEs.loc[struct][:-1].min()
for struct in short_gEs.index}
# Calculate the energy differences.
structs = []
nitride = []
nitrogen = []
N2_E = self.A.finalE("N2_4Sgeo.log", os.path.join(self.A.loc, "N2"))
for k,v in struct_Es.items():
structs.append(k)
try:
nitride.append(struct_Es[k + 'N']*2 - v*2 - N2_E)
except:
nitride.append(np.nan)
try:
nitrogen.append(struct_Es[k + 'N2'] - v - N2_E)
except:
nitrogen.append(np.nan)
# Tabulate the data.
headers = ['Add N2', 'Add N']
results = np.array([nitrogen, nitride]).T
rxn_Es = pd.DataFrame(data=results, index=structs, columns=headers)
rxn_Es = rxn_Es.dropna(axis=0, how='all')
print(rxn_Es.sort_values('Add N')*self.kcal_eH)
return rxn_Es*self.kcal_eH
def therm_N2_act(self):
"""Subtracts the thermodynamically-corrected energy of each M2(L)4
structure and N2 from the corresponding M2(L)4N and M2(L)4N2 structures, tabulating the results in kcal/mol.
"""
# Calculate G for all of the structures.
therm = self.A.therm_Es.dropna(axis=0, how='all')
therm['Symm #'] = [self.symm(struct) for struct in therm.index]
# S (rot) = kB*T(ln(qrot/sn)+N), N = 1, 1.5
therm['S*T (rot)'] = (
self.kB * self.temp *
(np.log(therm['qrot']/therm['Symm #']) + therm['rot #'])
)
therm['S*T (tot)'] = (
therm['S*T (el)'] + therm['S*T (vib)'] + therm['S*T (trans)']
+ therm['S*T (rot)']
)
# G = H - T*S
therm['G'] = therm['H'] - therm['S*T (tot)']
# Calculate the energy differences.
structs = []
nitride = []
nitrogen = []
N2_G = therm.loc['N2','G']
for base in therm.index:
structs.append(base)
base_G = therm.loc[base, 'G']
try:
nitride.append(therm.loc[base + 'N', 'G']*2 - base_G*2 - N2_G)
except KeyError:
nitride.append(np.nan)
try:
nitrogen.append(therm.loc[base + 'N2', 'G'] - base_G - N2_G)
except KeyError:
nitrogen.append(np.nan)
# Tabulate the data.
headers = ['Add N2', 'Add N']
results = np.array([nitrogen, nitride]).T
rxn_Es = pd.DataFrame(data=results, index=structs, columns=headers)
rxn_Es = rxn_Es.dropna(axis=0, how='all')
print(rxn_Es.sort_values('Add N')*self.kcal_eH)
return rxn_Es*self.kcal_eH
def symm(self, structure):
"""Gives the symmetry numbers for N2, M2(L)4, M2(L)4N, and M2(L)4N2.
"""
sn = 1
if 'N2' in structure:
sn = sn*2
if 'OO4' in structure:
sn = sn*4*2*3*3*3*3
if '2N' in structure:
sn = sn*4*3*3*3*3
return sn
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2015, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""Set based operations for IP addresses and subnets."""
import itertools as _itertools
from netaddr.ip import IPNetwork, IPAddress, IPRange, cidr_merge, \
cidr_exclude, iprange_to_cidrs
from netaddr.compat import _sys_maxint, _dict_keys, _int_type
def _subtract(supernet, subnets, subnet_idx, ranges):
"""Calculate IPSet([supernet]) - IPSet(subnets).
Assumptions: subnets is sorted, subnet_idx points to the first
element in subnets that is a subnet of supernet.
Results are appended to the ranges parameter as tuples of in format
(version, first, last). Return value is the first subnet_idx that
does not point to a subnet of supernet (or len(subnets) if all
subsequents items are a subnet of supernet).
"""
version = supernet._module.version
subnet = subnets[subnet_idx]
if subnet.first > supernet.first:
ranges.append((version, supernet.first, subnet.first - 1))
subnet_idx += 1
prev_subnet = subnet
while subnet_idx < len(subnets):
cur_subnet = subnets[subnet_idx]
if cur_subnet not in supernet:
break
if prev_subnet.last + 1 == cur_subnet.first:
# two adjacent, non-mergable IPNetworks
pass
else:
ranges.append((version, prev_subnet.last + 1, cur_subnet.first - 1))
subnet_idx += 1
prev_subnet = cur_subnet
first = prev_subnet.last + 1
last = supernet.last
if first <= last:
ranges.append((version, first, last))
return subnet_idx
def _iter_merged_ranges(sorted_ranges):
"""Iterate over sorted_ranges, merging where possible
Sorted ranges must be a sorted iterable of (version, first, last) tuples.
Merging occurs for pairs like [(4, 10, 42), (4, 43, 100)] which is merged
into (4, 10, 100), and leads to return value
( IPAddress(10, 4), IPAddress(100, 4) ), which is suitable input for the
iprange_to_cidrs function.
"""
if not sorted_ranges:
return
current_version, current_start, current_stop = sorted_ranges[0]
for next_version, next_start, next_stop in sorted_ranges[1:]:
if next_start == current_stop + 1 and next_version == current_version:
# Can be merged.
current_stop = next_stop
continue
# Cannot be merged.
yield (IPAddress(current_start, current_version),
IPAddress(current_stop, current_version))
current_start = next_start
current_stop = next_stop
current_version = next_version
yield (IPAddress(current_start, current_version),
IPAddress(current_stop, current_version))
class IPSet(object):
"""
Represents an unordered collection (set) of unique IP addresses and
subnets.
"""
__slots__ = ('_cidrs',)
def __init__(self, iterable=None, flags=0):
"""
Constructor.
:param iterable: (optional) an iterable containing IP addresses and
subnets.
:param flags: decides which rules are applied to the interpretation
of the addr value. See the netaddr.core namespace documentation
for supported constant values.
"""
if isinstance(iterable, IPNetwork):
self._cidrs = {IPNetwork(iterable): True}
elif isinstance(iterable, IPRange):
self._cidrs = dict.fromkeys(
iprange_to_cidrs(iterable[0], iterable[-1]), True)
elif isinstance(iterable, IPSet):
self._cidrs = dict.fromkeys(iterable.iter_cidrs(), True)
else:
self._cidrs = {}
if iterable is not None:
mergeable = []
for addr in iterable:
if isinstance(addr, _int_type):
addr = IPAddress(addr, flags=flags)
mergeable.append(addr)
for cidr in cidr_merge(mergeable):
self._cidrs[cidr] = True
def __getstate__(self):
""":return: Pickled state of an ``IPSet`` object."""
return tuple([cidr.__getstate__() for cidr in self._cidrs])
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled ``IPSet`` object.
"""
self._cidrs = dict.fromkeys(
(IPNetwork((value, prefixlen), version=version)
for value, prefixlen, version in state),
True)
def _compact_single_network(self, added_network):
"""
Same as compact(), but assume that added_network is the only change and
that this IPSet was properly compacted before added_network was added.
This allows to perform compaction much faster. added_network must
already be present in self._cidrs.
"""
added_first = added_network.first
added_last = added_network.last
added_version = added_network.version
# Check for supernets and subnets of added_network.
if added_network._prefixlen == added_network._module.width:
# This is a single IP address, i.e. /32 for IPv4 or /128 for IPv6.
# It does not have any subnets, so we only need to check for its
# potential supernets.
for potential_supernet in added_network.supernet():
if potential_supernet in self._cidrs:
del self._cidrs[added_network]
return
else:
# IPNetworks from self._cidrs that are subnets of added_network.
to_remove = []
for cidr in self._cidrs:
if (cidr._module.version != added_version or cidr == added_network):
# We found added_network or some network of a different version.
continue
first = cidr.first
last = cidr.last
if first >= added_first and last <= added_last:
# cidr is a subnet of added_network. Remember to remove it.
to_remove.append(cidr)
elif first <= added_first and last >= added_last:
# cidr is a supernet of added_network. Remove added_network.
del self._cidrs[added_network]
# This IPSet was properly compacted before. Since added_network
# is removed now, it must again be properly compacted -> done.
assert(not to_remove)
return
for item in to_remove:
del self._cidrs[item]
# Check if added_network can be merged with another network.
# Note that merging can only happen between networks of the same
# prefixlen. This just leaves 2 candidates: The IPNetworks just before
# and just after the added_network.
# This can be reduced to 1 candidate: 10.0.0.0/24 and 10.0.1.0/24 can
# be merged into into 10.0.0.0/23. But 10.0.1.0/24 and 10.0.2.0/24
# cannot be merged. With only 1 candidate, we might as well make a
# dictionary lookup.
shift_width = added_network._module.width - added_network.prefixlen
while added_network.prefixlen != 0:
# figure out if the least significant bit of the network part is 0 or 1.
the_bit = (added_network._value >> shift_width) & 1
if the_bit:
candidate = added_network.previous()
else:
candidate = added_network.next()
if candidate not in self._cidrs:
# The only possible merge does not work -> merge done
return
# Remove added_network&candidate, add merged network.
del self._cidrs[candidate]
del self._cidrs[added_network]
added_network.prefixlen -= 1
# Be sure that we set the host bits to 0 when we move the prefixlen.
# Otherwise, adding 255.255.255.255/32 will result in a merged
# 255.255.255.255/24 network, but we want 255.255.255.0/24.
shift_width += 1
added_network._value = (added_network._value >> shift_width) << shift_width
self._cidrs[added_network] = True
def compact(self):
"""
Compact internal list of `IPNetwork` objects using a CIDR merge.
"""
cidrs = cidr_merge(self._cidrs)
self._cidrs = dict.fromkeys(cidrs, True)
def __hash__(self):
"""
Raises ``TypeError`` if this method is called.
.. note:: IPSet objects are not hashable and cannot be used as \
dictionary keys or as members of other sets. \
"""
raise TypeError('IP sets are unhashable!')
def __contains__(self, ip):
"""
:param ip: An IP address or subnet.
:return: ``True`` if IP address or subnet is a member of this IP set.
"""
ip = IPNetwork(ip)
# Iterating over self._cidrs is an O(n) operation: 1000 items in
# self._cidrs would mean 1000 loops. Iterating over all possible
# supernets loops at most 32 times for IPv4 or 128 times for IPv6,
# no matter how many CIDRs this object contains.
if ip in self._cidrs:
return True
for cidr in ip.supernet():
if cidr in self._cidrs:
return True
return False
def __nonzero__(self):
"""Return True if IPSet contains at least one IP, else False"""
return bool(self._cidrs)
__bool__ = __nonzero__ # Python 3.x.
def __iter__(self):
"""
:return: an iterator over the IP addresses within this IP set.
"""
return _itertools.chain(*sorted(self._cidrs))
def iter_cidrs(self):
"""
:return: an iterator over individual IP subnets within this IP set.
"""
return sorted(self._cidrs)
def add(self, addr, flags=0):
"""
Adds an IP address or subnet or IPRange to this IP set. Has no effect if
it is already present.
Note that where possible the IP address or subnet is merged with other
members of the set to form more concise CIDR blocks.
:param addr: An IP address or subnet in either string or object form, or
an IPRange object.
:param flags: decides which rules are applied to the interpretation
of the addr value. See the netaddr.core namespace documentation
for supported constant values.
"""
if isinstance(addr, IPRange):
new_cidrs = dict.fromkeys(
iprange_to_cidrs(addr[0], addr[-1]), True)
self._cidrs.update(new_cidrs)
self.compact()
return
if isinstance(addr, _int_type):
addr = IPNetwork(IPAddress(addr, flags=flags))
else:
addr = IPNetwork(addr)
self._cidrs[addr] = True
self._compact_single_network(addr)
def remove(self, addr, flags=0):
"""
Removes an IP address or subnet or IPRange from this IP set. Does
nothing if it is not already a member.
Note that this method behaves more like discard() found in regular
Python sets because it doesn't raise KeyError exceptions if the
IP address or subnet is question does not exist. It doesn't make sense
to fully emulate that behaviour here as IP sets contain groups of
individual IP addresses as individual set members using IPNetwork
objects.
:param addr: An IP address or subnet, or an IPRange.
:param flags: decides which rules are applied to the interpretation
of the addr value. See the netaddr.core namespace documentation
for supported constant values.
"""
if isinstance(addr, IPRange):
cidrs = iprange_to_cidrs(addr[0], addr[-1])
for cidr in cidrs:
self.remove(cidr)
return
if isinstance(addr, _int_type):
addr = IPAddress(addr, flags=flags)
else:
addr = IPNetwork(addr)
# This add() is required for address blocks provided that are larger
# than blocks found within the set but have overlaps. e.g. :-
#
# >>> IPSet(['192.0.2.0/24']).remove('192.0.2.0/23')
# IPSet([])
#
self.add(addr)
remainder = None
matching_cidr = None
# Search for a matching CIDR and exclude IP from it.
for cidr in self._cidrs:
if addr in cidr:
remainder = cidr_exclude(cidr, addr)
matching_cidr = cidr
break
# Replace matching CIDR with remaining CIDR elements.
if remainder is not None:
del self._cidrs[matching_cidr]
for cidr in remainder:
self._cidrs[cidr] = True
# No call to self.compact() is needed. Removing an IPNetwork cannot
# create mergable networks.
def pop(self):
"""
Removes and returns an arbitrary IP address or subnet from this IP
set.
:return: An IP address or subnet.
"""
return self._cidrs.popitem()[0]
def isdisjoint(self, other):
"""
:param other: an IP set.
:return: ``True`` if this IP set has no elements (IP addresses
or subnets) in common with other. Intersection *must* be an
empty set.
"""
result = self.intersection(other)
return not result
def copy(self):
""":return: a shallow copy of this IP set."""
obj_copy = self.__class__()
obj_copy._cidrs.update(self._cidrs)
return obj_copy
def update(self, iterable, flags=0):
"""
Update the contents of this IP set with the union of itself and
other IP set.
:param iterable: an iterable containing IP addresses and subnets.
:param flags: decides which rules are applied to the interpretation
of the addr value. See the netaddr.core namespace documentation
for supported constant values.
"""
if isinstance(iterable, IPSet):
self._cidrs = dict.fromkeys(
(ip for ip in cidr_merge(_dict_keys(self._cidrs)
+ _dict_keys(iterable._cidrs))), True)
return
elif isinstance(iterable, (IPNetwork, IPRange)):
self.add(iterable)
return
if not hasattr(iterable, '__iter__'):
raise TypeError('an iterable was expected!')
# An iterable containing IP addresses or subnets.
mergeable = []
for addr in iterable:
if isinstance(addr, _int_type):
addr = IPAddress(addr, flags=flags)
mergeable.append(addr)
for cidr in cidr_merge(_dict_keys(self._cidrs) + mergeable):
self._cidrs[cidr] = True
self.compact()
def clear(self):
"""Remove all IP addresses and subnets from this IP set."""
self._cidrs = {}
def __eq__(self, other):
"""
:param other: an IP set
:return: ``True`` if this IP set is equivalent to the ``other`` IP set,
``False`` otherwise.
"""
try:
return self._cidrs == other._cidrs
except AttributeError:
return NotImplemented
def __ne__(self, other):
"""
:param other: an IP set
:return: ``False`` if this IP set is equivalent to the ``other`` IP set,
``True`` otherwise.
"""
try:
return self._cidrs != other._cidrs
except AttributeError:
return NotImplemented
def __lt__(self, other):
"""
:param other: an IP set
:return: ``True`` if this IP set is less than the ``other`` IP set,
``False`` otherwise.
"""
if not hasattr(other, '_cidrs'):
return NotImplemented
return self.size < other.size and self.issubset(other)
def issubset(self, other):
"""
:param other: an IP set.
:return: ``True`` if every IP address and subnet in this IP set
is found within ``other``.
"""
for cidr in self._cidrs:
if cidr not in other:
return False
return True
__le__ = issubset
def __gt__(self, other):
"""
:param other: an IP set.
:return: ``True`` if this IP set is greater than the ``other`` IP set,
``False`` otherwise.
"""
if not hasattr(other, '_cidrs'):
return NotImplemented
return self.size > other.size and self.issuperset(other)
def issuperset(self, other):
"""
:param other: an IP set.
:return: ``True`` if every IP address and subnet in other IP set
is found within this one.
"""
if not hasattr(other, '_cidrs'):
return NotImplemented
for cidr in other._cidrs:
if cidr not in self:
return False
return True
__ge__ = issuperset
def union(self, other):
"""
:param other: an IP set.
:return: the union of this IP set and another as a new IP set
(combines IP addresses and subnets from both sets).
"""
ip_set = self.copy()
ip_set.update(other)
return ip_set
__or__ = union
def intersection(self, other):
"""
:param other: an IP set.
:return: the intersection of this IP set and another as a new IP set.
(IP addresses and subnets common to both sets).
"""
result_cidrs = {}
own_nets = sorted(self._cidrs)
other_nets = sorted(other._cidrs)
own_idx = 0
other_idx = 0
own_len = len(own_nets)
other_len = len(other_nets)
while own_idx < own_len and other_idx < other_len:
own_cur = own_nets[own_idx]
other_cur = other_nets[other_idx]
if own_cur == other_cur:
result_cidrs[own_cur] = True
own_idx += 1
other_idx += 1
elif own_cur in other_cur:
result_cidrs[own_cur] = True
own_idx += 1
elif other_cur in own_cur:
result_cidrs[other_cur] = True
other_idx += 1
else:
# own_cur and other_cur have nothing in common
if own_cur < other_cur:
own_idx += 1
else:
other_idx += 1
# We ran out of networks in own_nets or other_nets. Either way, there
# can be no further result_cidrs.
result = IPSet()
result._cidrs = result_cidrs
return result
__and__ = intersection
def symmetric_difference(self, other):
"""
:param other: an IP set.
:return: the symmetric difference of this IP set and another as a new
IP set (all IP addresses and subnets that are in exactly one
of the sets).
"""
# In contrast to intersection() and difference(), we cannot construct
# the result_cidrs easily. Some cidrs may have to be merged, e.g. for
# IPSet(["10.0.0.0/32"]).symmetric_difference(IPSet(["10.0.0.1/32"])).
result_ranges = []
own_nets = sorted(self._cidrs)
other_nets = sorted(other._cidrs)
own_idx = 0
other_idx = 0
own_len = len(own_nets)
other_len = len(other_nets)
while own_idx < own_len and other_idx < other_len:
own_cur = own_nets[own_idx]
other_cur = other_nets[other_idx]
if own_cur == other_cur:
own_idx += 1
other_idx += 1
elif own_cur in other_cur:
own_idx = _subtract(other_cur, own_nets, own_idx, result_ranges)
other_idx += 1
elif other_cur in own_cur:
other_idx = _subtract(own_cur, other_nets, other_idx, result_ranges)
own_idx += 1
else:
# own_cur and other_cur have nothing in common
if own_cur < other_cur:
result_ranges.append( (own_cur._module.version,
own_cur.first, own_cur.last) )
own_idx += 1
else:
result_ranges.append( (other_cur._module.version,
other_cur.first, other_cur.last) )
other_idx += 1
# If the above loop terminated because it processed all cidrs of
# "other", then any remaining cidrs in self must be part of the result.
while own_idx < own_len:
own_cur = own_nets[own_idx]
result_ranges.append((own_cur._module.version,
own_cur.first, own_cur.last))
own_idx += 1
# If the above loop terminated because it processed all cidrs of
# self, then any remaining cidrs in "other" must be part of the result.
while other_idx < other_len:
other_cur = other_nets[other_idx]
result_ranges.append((other_cur._module.version,
other_cur.first, other_cur.last))
other_idx += 1
result = IPSet()
for start, stop in _iter_merged_ranges(result_ranges):
cidrs = iprange_to_cidrs(start, stop)
for cidr in cidrs:
result._cidrs[cidr] = True
return result
__xor__ = symmetric_difference
def difference(self, other):
"""
:param other: an IP set.
:return: the difference between this IP set and another as a new IP
set (all IP addresses and subnets that are in this IP set but
not found in the other.)
"""
result_ranges = []
result_cidrs = {}
own_nets = sorted(self._cidrs)
other_nets = sorted(other._cidrs)
own_idx = 0
other_idx = 0
own_len = len(own_nets)
other_len = len(other_nets)
while own_idx < own_len and other_idx < other_len:
own_cur = own_nets[own_idx]
other_cur = other_nets[other_idx]
if own_cur == other_cur:
own_idx += 1
other_idx += 1
elif own_cur in other_cur:
own_idx += 1
elif other_cur in own_cur:
other_idx = _subtract(own_cur, other_nets, other_idx,
result_ranges)
own_idx += 1
else:
# own_cur and other_cur have nothing in common
if own_cur < other_cur:
result_cidrs[own_cur] = True
own_idx += 1
else:
other_idx += 1
# If the above loop terminated because it processed all cidrs of
# "other", then any remaining cidrs in self must be part of the result.
while own_idx < own_len:
result_cidrs[own_nets[own_idx]] = True
own_idx += 1
for start, stop in _iter_merged_ranges(result_ranges):
for cidr in iprange_to_cidrs(start, stop):
result_cidrs[cidr] = True
result = IPSet()
result._cidrs = result_cidrs
return result
__sub__ = difference
def __len__(self):
"""
:return: the cardinality of this IP set (i.e. sum of individual IP \
addresses). Raises ``IndexError`` if size > maxint (a Python \
limitation). Use the .size property for subnets of any size.
"""
size = self.size
if size > _sys_maxint:
raise IndexError("range contains more than %d (sys.maxint) " \
"IP addresses! Use the .size property instead." % _sys_maxint)
return size
@property
def size(self):
"""
The cardinality of this IP set (based on the number of individual IP
addresses including those implicitly defined in subnets).
"""
return sum([cidr.size for cidr in self._cidrs])
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return 'IPSet(%r)' % [str(c) for c in sorted(self._cidrs)]
__str__ = __repr__
def iscontiguous(self):
"""
Returns True if the members of the set form a contiguous IP
address range (with no gaps), False otherwise.
:return: ``True`` if the ``IPSet`` object is contiguous.
"""
cidrs = self.iter_cidrs()
if len(cidrs) > 1:
previous = cidrs[0][0]
for cidr in cidrs:
if cidr[0] != previous:
return False
previous = cidr[-1] + 1
return True
def iprange(self):
"""
Generates an IPRange for this IPSet, if all its members
form a single contiguous sequence.
Raises ``ValueError`` if the set is not contiguous.
:return: An ``IPRange`` for all IPs in the IPSet.
"""
if self.iscontiguous():
cidrs = self.iter_cidrs()
if not cidrs:
return None
return IPRange(cidrs[0][0], cidrs[-1][-1])
else:
raise ValueError("IPSet is not contiguous")
def iter_ipranges(self):
"""Generate the merged IPRanges for this IPSet.
In contrast to self.iprange(), this will work even when the IPSet is
not contiguous. Adjacent IPRanges will be merged together, so you
get the minimal number of IPRanges.
"""
sorted_ranges = [(cidr._module.version, cidr.first, cidr.last) for
cidr in self.iter_cidrs()]
for start, stop in _iter_merged_ranges(sorted_ranges):
yield IPRange(start, stop)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from unittest import mock
import ddt
from cinder.api.contrib import volume_image_metadata as image_metadata
from cinder.api import microversions as mv
from cinder.api.v3 import volume_metadata
from cinder import db
from cinder import exception
from cinder.policies import volume_metadata as policy
from cinder.tests.unit.api import fakes as fake_api
from cinder.tests.unit.policies import base
from cinder.tests.unit.policies import test_base
from cinder.tests.unit import utils as test_utils
from cinder.volume import api as volume_api
@ddt.ddt
class VolumeMetadataPolicyTest(base.BasePolicyTest):
authorized_readers = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
]
unauthorized_readers = [
'system_member',
'system_reader',
'system_foo',
'other_project_member',
'other_project_reader',
]
authorized_members = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
]
unauthorized_members = [
'system_member',
'system_reader',
'system_foo',
'other_project_member',
'other_project_reader',
]
authorized_admins = [
'legacy_admin',
'system_admin',
'project_admin',
]
unauthorized_admins = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'project_member',
'project_reader',
'project_foo',
'other_project_member',
'other_project_reader',
]
# DB validations will throw VolumeNotFound for some contexts
unauthorized_exceptions = [
exception.VolumeNotFound,
]
# Basic policy test is without enforcing scope (which cinder doesn't
# yet support) and deprecated rules enabled.
def setUp(self, enforce_scope=False, enforce_new_defaults=False,
*args, **kwargs):
super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs)
self.controller = volume_metadata.Controller()
self.image_controller = image_metadata.VolumeImageMetadataController()
self.api_path = '/v3/%s/volumes' % (self.project_id)
self.api_version = mv.BASE_VERSION
def _create_volume(self, image_metadata=None, **kwargs):
vol_type = test_utils.create_volume_type(self.project_admin_context,
name='fake_vol_type',
testcase_instance=self)
volume = test_utils.create_volume(self.project_member_context,
volume_type_id=vol_type.id,
testcase_instance=self, **kwargs)
for (k, v) in (image_metadata.items() if image_metadata else []):
db.volume_glance_metadata_create(self.project_admin_context,
volume.id, k, v)
return volume
@ddt.data(*base.all_users)
def test_get_policy(self, user_id):
volume = self._create_volume()
rule_name = policy.GET_POLICY
url = '%s/%s/metadata' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
self.common_policy_check(user_id, self.authorized_readers,
self.unauthorized_readers,
self.unauthorized_exceptions,
rule_name, self.controller.index, req,
volume_id=volume.id)
@ddt.data(*base.all_users)
def test_create_policy(self, user_id):
volume = self._create_volume()
rule_name = policy.CREATE_POLICY
url = '%s/%s/metadata' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
req.method = 'POST'
body = {
"metadata": {
"name": "metadata0"
}
}
self.common_policy_check(user_id, self.authorized_members,
self.unauthorized_members,
self.unauthorized_exceptions,
rule_name, self.controller.create, req,
volume_id=volume.id, body=body)
@ddt.data(*base.all_users)
def test_update_policy(self, user_id):
volume = self._create_volume(metadata={"foo": "bar"})
rule_name = policy.UPDATE_POLICY
url = '%s/%s/metadata' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
req.method = 'PUT'
body = {
# Not sure why, but the API code expects the body to contain
# a "meta" (not "metadata") dict.
"meta": {
"foo": "zap"
}
}
self.common_policy_check(user_id, self.authorized_members,
self.unauthorized_members,
self.unauthorized_exceptions,
rule_name, self.controller.update, req,
volume_id=volume.id, id='foo', body=body)
@ddt.data(*base.all_users)
def test_delete_policy(self, user_id):
volume = self._create_volume(metadata={"foo": "bar"})
rule_name = policy.DELETE_POLICY
url = '%s/%s/metadata/foo' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
req.method = 'DELETE'
# Relax the GET_POLICY in order to get past that check.
self.policy.set_rules({policy.GET_POLICY: ""},
overwrite=False)
self.common_policy_check(user_id, self.authorized_members,
self.unauthorized_members,
self.unauthorized_exceptions,
rule_name, self.controller.delete, req,
volume_id=volume.id, id='foo')
@ddt.data(*base.all_users)
def test_image_metadata_show_policy(self, user_id):
image_metadata = {
"up": "down",
"left": "right"
}
volume = self._create_volume(image_metadata)
volume = volume.obj_to_primitive()['versioned_object.data']
rule_name = policy.IMAGE_METADATA_SHOW_POLICY
url = '%s/%s' % (self.api_path, volume['id'])
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
req.get_db_volume = mock.MagicMock()
req.get_db_volume.return_value = volume
resp_obj = mock.MagicMock(obj={'volume': volume})
self.assertNotIn('volume_image_metadata', volume.keys())
self.common_policy_check(user_id, self.authorized_readers,
self.unauthorized_readers,
self.unauthorized_exceptions,
rule_name,
self.image_controller.show, req,
resp_obj, id=volume['id'], fatal=False)
if user_id in self.authorized_readers:
self.assertDictEqual(image_metadata,
volume['volume_image_metadata'])
@ddt.data(*base.all_users)
def test_image_metadata_set_policy(self, user_id):
volume = self._create_volume()
rule_name = policy.IMAGE_METADATA_SET_POLICY
url = '%s/%s/action' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
req.method = 'POST'
body = {
"os-set_image_metadata": {
"metadata": {
"image_name": "my_image",
}
}
}
self.common_policy_check(user_id, self.authorized_members,
self.unauthorized_members,
self.unauthorized_exceptions,
rule_name,
self.image_controller.create, req,
id=volume.id, body=body)
@ddt.data(*base.all_users)
def test_image_metadata_remove_policy(self, user_id):
volume = self._create_volume(image_metadata={"foo": "bar"})
rule_name = policy.IMAGE_METADATA_REMOVE_POLICY
url = '%s/%s/action' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
req.method = 'POST'
body = {
"os-unset_image_metadata": {
"key": "foo"
}
}
self.common_policy_check(user_id, self.authorized_members,
self.unauthorized_members,
self.unauthorized_exceptions,
rule_name,
self.image_controller.delete, req,
id=volume.id, body=body)
# NOTE(abishop):
# The following code is a work in progress, and work is deferred until
# Yoga. This is because the UPDATE_ADMIN_METADATA_POLICY rule is
# unchanged in Xena (it's RULE_ADMIN_API). This test will be necessary
# when RULE_ADMIN_API is deprecated in Yoga.
#
# @ddt.data(*base.all_users)
# def test_update_admin_metadata_policy(self, user_id):
# volume = self._create_volume()
# rule_name = policy.UPDATE_ADMIN_METADATA_POLICY
# url = '%s/%s/action' % (self.api_path, volume.id)
# req = fake_api.HTTPRequest.blank(url, version=self.api_version)
# req.method = 'POST'
# body = {
# "os-update_readonly_flag": {
# "readonly": True
# }
# }
#
# # Only this test needs a VolumeActionsController
# ext_mgr = extensions.ExtensionManager()
# controller = volume_actions.VolumeActionsController(ext_mgr)
#
# # Relax the UPDATE_READONLY_POLICY in order to get past that check.
# self.policy.set_rules({va_policy.UPDATE_READONLY_POLICY: ""},
# overwrite=False)
#
# self.common_policy_check(user_id, self.authorized_admins,
# self.unauthorized_admins,
# self.unauthorized_exceptions,
# rule_name,
# controller._volume_readonly_update, req,
# id=volume.id, body=body)
class VolumeMetadataPolicySecureRbacTest(VolumeMetadataPolicyTest):
authorized_readers = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
'project_reader',
]
unauthorized_readers = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'project_foo',
'other_project_member',
'other_project_reader',
]
authorized_members = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
]
unauthorized_members = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'project_reader',
'project_foo',
'other_project_member',
'other_project_reader',
]
def setUp(self, *args, **kwargs):
# Test secure RBAC by disabling deprecated policy rules (scope
# is still not enabled).
super().setUp(enforce_scope=False, enforce_new_defaults=True,
*args, **kwargs)
class VolumePolicyTests(test_base.CinderPolicyTests):
def test_admin_can_get_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v', res_meta['k'])
def test_owner_can_get_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v', res_meta['k'])
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_get_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(non_owner_context, path, 'GET')
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
def test_admin_can_create_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k1": "v1"}}
response = self._get_request_response(admin_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
def test_owner_can_create_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k1": "v1"}}
response = self._get_request_response(user_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_create_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k1": "v1"}}
response = self._get_request_response(non_owner_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
def test_admin_can_delete_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id,
'key': 'k'
}
response = self._get_request_response(admin_context, path, 'DELETE')
self.assertEqual(HTTPStatus.OK, response.status_int)
def test_owner_can_delete_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id,
'key': 'k'
}
response = self._get_request_response(user_context, path, 'DELETE')
self.assertEqual(HTTPStatus.OK, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_delete_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % {
'project_id': non_owner_context.project_id,
'volume_id': volume.id,
'key': 'k'
}
response = self._get_request_response(non_owner_context, path,
'DELETE')
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
def test_admin_can_update_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k": "v2"}}
response = self._get_request_response(admin_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v2', res_meta['k'])
def test_owner_can_update_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k": "v2"}}
response = self._get_request_response(user_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v2', res_meta['k'])
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_update_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k": "v2"}}
response = self._get_request_response(non_owner_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
|
|
#!/usr/bin/env python3
"""Update user groups MassMessage lists."""
# Author : JJMC89
# License: MIT
import argparse
import datetime
import json
import re
from collections import OrderedDict
from contextlib import suppress
from datetime import date, time, timedelta
from itertools import chain
from operator import itemgetter
from typing import Any, Dict, List, Optional, Set, Union
import pywikibot
from pywikibot.bot import (
_GLOBAL_HELP,
ExistingPageBot,
NoRedirectPageBot,
SingleSiteBot,
)
from pywikibot.pagegenerators import PreloadingGenerator
from typing_extensions import TypedDict
PageDict = Dict[
Union[str, pywikibot.User], Union[pywikibot.Page, Set[pywikibot.Page]]
]
class GroupChange(TypedDict):
"""Group change."""
user: pywikibot.User
added: Set[str]
removed: Set[str]
timestamp: pywikibot.Timestamp
class Rename(TypedDict):
"""Rename."""
olduser: pywikibot.User
newuser: pywikibot.User
timestamp: pywikibot.Timestamp
class UserGroupsMassMessageListUpdater(
SingleSiteBot, NoRedirectPageBot, ExistingPageBot
):
"""Bot to update MassMessage lists."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize."""
self.available_options.update( # pylint: disable=no-member
{
'config': dict(),
'group_changes': list(),
'renames': [
{'olduser': None, 'newuser': None, 'timestamp': None}
],
}
)
super().__init__(**kwargs)
def check_disabled(self) -> None:
"""Check if the task is disabled. If so, quit."""
class_name = self.__class__.__name__
page = pywikibot.Page(
self.site,
f'User:{self.site.username()}/shutoff/{class_name}.json',
)
if page.exists():
content = page.get(force=True).strip()
if content:
pywikibot.error(f'{class_name} disabled:\n{content}')
self.quit()
def treat_page(self) -> None:
"""Process one page."""
self.check_disabled()
page_config = self.opt.config[self.current_page.title()]
added_count = removed_count = renamed_count = 0
page_json = json.loads(
self.current_page.text, object_pairs_hook=OrderedDict
)
page_dict = {'>nonusers': set()} # type: PageDict
# Process the current targets.
for item in page_json['targets']:
page = pywikibot.Page(self.site, item['title'])
if page.namespace().id not in (2, 3):
page_dict['>nonusers'].add(page)
continue
base_page = pywikibot.Page(
self.site, re.sub(r'^([^/]+).*', r'\1', page.title())
)
if base_page.isTalkPage():
user = pywikibot.User(base_page.toggleTalkPage())
else:
user = pywikibot.User(base_page)
# Handle renames.
for rename in self.opt.renames:
if user != rename['olduser']:
continue
newuser = rename['newuser']
newpage = pywikibot.Page(
self.site,
re.sub(
fr':{re.escape(user.title(with_ns=False))}\b',
f':{newuser.title(with_ns=False)}',
page.title(),
),
)
pywikibot.log(
f'{user.title()} renamed to {newuser.title()} '
f'({page.title()} to {newpage.title()})'
)
user = newuser
page = newpage
renamed_count += 1
if page_config.get('required', None):
if not page_config['group'] & set(user.groups()):
pywikibot.log(f'Removed {user}, not in required group')
removed_count += 1
continue
page_dict[user] = page
# Handle group changes.
for change in self.opt.group_changes:
user = change['user']
if (
page_config.get('add', None)
and (page_config['group'] & change['added'])
and 'bot' not in user.groups()
and user not in page_dict
):
pywikibot.log(f'Added {user.title()}')
page_dict[user] = user.toggleTalkPage()
added_count += 1
if page_config.get('remove', None) and (
page_config['group'] & change['removed']
):
if page_dict.pop(user, None):
pywikibot.log(f'Removed {user.title()}')
removed_count += 1
# Build JSON and save.
if added_count or removed_count or renamed_count:
new_page_json = OrderedDict()
new_page_json['description'] = page_json['description']
new_page_json['targets'] = []
for page in sorted(
page_dict.pop('>nonusers') | set(page_dict.values())
):
new_page_json['targets'].append({'title': page.title()})
text = json.dumps(new_page_json, ensure_ascii=False, indent=4)
if added_count + removed_count + renamed_count == 0:
return
summary_parts = []
if added_count > 0:
summary_parts.append(f'{added_count} added')
if removed_count > 0:
summary_parts.append(f'{removed_count} removed')
if renamed_count > 0:
summary_parts.append(f'{renamed_count} renamed')
summary = f"Update MassMessage list: {','.join(summary_parts)}"
self.put_current(text, summary=summary, minor=False)
def make_arg_parser() -> argparse.ArgumentParser:
"""Return the argument parser."""
parser = argparse.ArgumentParser(
description='Update user groups MassMessage lists',
epilog=re.sub(
r'\n\n?-help +.+?(\n\n-|\s*$)',
r'\1',
_GLOBAL_HELP,
flags=re.S,
),
formatter_class=argparse.RawDescriptionHelpFormatter,
allow_abbrev=False,
)
parser.add_argument(
'config',
help='Page title that has the JSON config (object)',
)
parser.add_argument(
'--always',
'-a',
action='store_true',
help='Do not prompt to save changes',
)
parser.add_argument(
'--meta',
action='store_true',
help=(
'metawiki will also be checked for group changes. '
'Should be specified when running on WMF wikis with CentralAuth.'
),
)
parser.add_argument(
'--rename',
action='store_true',
help='Rename logs will be parsed. If --meta, from metawiki.',
)
yesterday = date.today() - timedelta(days=1)
parser.add_argument(
'--start',
default=datetime.datetime.combine(yesterday, time.min),
type=pywikibot.Timestamp.fromISOformat,
help='Timestamp to start from',
metavar='%Y-%m-%dT%H:%M:%SZ',
)
parser.add_argument(
'--end',
default=datetime.datetime.combine(yesterday, time.max),
type=pywikibot.Timestamp.fromISOformat,
help='Timestamp to end at',
metavar='%Y-%m-%dT%H:%M:%SZ',
)
return parser
def get_json_from_page(page: pywikibot.Page) -> Dict[str, Any]:
"""
Return JSON from the page.
:param page: Page to read
"""
if not page.exists():
pywikibot.error(f'{page!r} does not exist.')
return {}
if page.isRedirectPage():
pywikibot.error(f'{page!r} is a redirect.')
return {}
try:
return json.loads(page.get().strip())
except ValueError:
pywikibot.error(f'{page!r} does not contain valid JSON.')
raise
def validate_config(
config: Dict[str, Any], site: pywikibot.site.APISite
) -> bool:
"""
Validate the configuration and return bool.
:param config: configuration to validate
:param site: site used in the validation
"""
pywikibot.log('config:')
for title, page_config in config.items():
pywikibot.log(f'-{title} = {page_config}')
page_config['page'] = pywikibot.Page(site, title)
required_keys = ['enabled', 'group', 'page']
has_keys = []
for key, value in page_config.items():
if key in required_keys:
has_keys.append(key)
if key in ('add', 'enabled', 'remove', 'required'):
if not isinstance(value, bool):
return False
elif key == 'group':
if isinstance(value, str):
page_config[key] = {value}
else:
return False
elif key == 'page':
if value.content_model != 'MassMessageListContent':
return False
else:
return False
if sorted(has_keys) != sorted(required_keys):
return False
return True
def get_renames(
rename_site: pywikibot.site.APISite,
logtype: str,
start: datetime.datetime,
end: datetime.datetime,
site: pywikibot.site.APISite,
) -> List[Rename]:
"""Retrun a sorted list of reenames."""
renames = []
rename_events = rename_site.logevents(
logtype=logtype, start=start, end=end, reverse=True
)
for rename in rename_events:
with suppress(KeyError):
renames.append(
Rename(
olduser=pywikibot.User(
site, rename.data['params']['olduser']
),
newuser=pywikibot.User(
site, rename.data['params']['newuser']
),
timestamp=rename.timestamp(),
)
)
return sorted(renames, key=itemgetter('timestamp'))
def get_group_changes(
site: pywikibot.site.APISite,
start: datetime.datetime,
end: datetime.datetime,
meta: Optional[pywikibot.site.APISite],
) -> List[GroupChange]:
"""Return a sorted list of group canges."""
group_changes = []
rights_events = site.logevents(
logtype='rights', start=start, end=end, reverse=True
)
if meta:
meta_rights_events = set()
for log_event in meta.logevents(
logtype='rights', start=start, end=end, reverse=True
):
try:
if log_event.page().title().endswith(site.suffix):
meta_rights_events.add(log_event)
except KeyError:
continue
rights_events = chain(rights_events, meta_rights_events)
for log_event in rights_events:
with suppress(KeyError):
new_groups = set(log_event.newgroups)
old_groups = set(log_event.oldgroups)
group_changes.append(
GroupChange(
user=pywikibot.User(
site,
re.sub(
fr'{site.suffix}$',
'',
log_event.page().title(),
),
),
added=new_groups - old_groups,
removed=old_groups - new_groups,
timestamp=log_event.timestamp(),
)
)
return sorted(group_changes, key=itemgetter('timestamp'))
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
:param args: command line arguments
"""
local_args = pywikibot.handle_args(args, do_help=False)
site = pywikibot.Site()
site.login()
site.suffix = f'@{site.dbName()}'
parser = make_arg_parser()
options = vars(parser.parse_args(args=local_args))
config_page = pywikibot.Page(site, options.pop('config'))
config = get_json_from_page(config_page)
if not validate_config(config, site):
pywikibot.error('The specified configuration is invalid.')
return
options['config'] = config
meta = pywikibot.Site('meta', 'meta') if options.pop('meta') else None
start = options.pop('start')
end = options.pop('end')
if options.pop('rename'):
options['renames'] = get_renames(
rename_site=meta or site,
logtype='gblrename' if meta else 'renameuser',
start=start,
end=end,
site=site,
)
options['group_changes'] = get_group_changes(site, start, end, meta)
gen = PreloadingGenerator(
config[key]['page'] for key in config if config[key]['enabled']
)
UserGroupsMassMessageListUpdater(generator=gen, site=site, **options).run()
if __name__ == '__main__':
main()
|
|
# markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# markdown/searializers.py
#
# Add x/html serialization to Elementree
# Taken from ElementTree 1.3 preview with slight modifications
#
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import unicode_literals
from . import util
ElementTree = util.etree.ElementTree
QName = util.etree.QName
if hasattr(util.etree, 'test_comment'):
Comment = util.etree.test_comment
else:
Comment = util.etree.Comment
PI = util.etree.PI
ProcessingInstruction = util.etree.ProcessingInstruction
__all__ = ['to_html_string', 'to_xhtml_string']
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublic core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _serialize_html(write, elem, qnames, namespaces, format):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None, format)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
items.sort() # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
if qnames[k] == v and format == 'html':
# handle boolean attributes
write(" %s" % v)
else:
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (k, _escape_attrib(v)))
if format == "xhtml" and tag in HTML_EMPTY:
write(" />")
else:
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None, format)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _write_html(root,
encoding=None,
default_namespace=None,
format="html"):
assert root is not None
data = []
write = data.append
qnames, namespaces = _namespaces(root, default_namespace)
_serialize_html(write, root, qnames, namespaces, format)
if encoding is None:
return "".join(data)
else:
return _encode("".join(data))
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].split("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName) and tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, util.string_type):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def to_html_string(element):
return _write_html(ElementTree(element).getroot(), format="html")
def to_xhtml_string(element):
return _write_html(ElementTree(element).getroot(), format="xhtml")
|
|
import zipfile
from collections import defaultdict
from collections import namedtuple
from contextlib import closing
from cStringIO import StringIO
from itertools import imap
from time import gmtime
from urllib import urlencode
from botocore.exceptions import ClientError
from ModestMaps.Core import Coordinate
from msgpack import Unpacker
from raw_tiles.source.table_reader import TableReader
from raw_tiles.tile import Tile
from tilequeue.command import explode_and_intersect
from tilequeue.format import zip_format
from tilequeue.queue.message import MessageHandle
from tilequeue.tile import coord_marshall_int
from tilequeue.tile import coord_unmarshall_int
from tilequeue.tile import deserialize_coord
from tilequeue.toi import load_set_from_gzipped_fp
from tilequeue.utils import format_stacktrace_one_line
from tilequeue.utils import grouper
from tilequeue.utils import time_block
class SqsQueue(object):
def __init__(self, sqs_client, queue_url, recv_wait_time_seconds):
self.sqs_client = sqs_client
self.queue_url = queue_url
self.recv_wait_time_seconds = recv_wait_time_seconds
def send_without_retry(self, payloads):
"""
enqueue a sequence of payloads to the sqs queue
Each payload is already expected to be pre-formatted for the queue. At
this time, it should be a comma separated list of coordinates strings
that are grouped by their parent zoom.
This version does not retry, and returns any failed messages.
"""
msgs = []
for i, payload in enumerate(payloads):
msg_id = str(i)
msg = dict(
Id=msg_id,
MessageBody=payload,
)
msgs.append(msg)
resp = self.sqs_client.send_message_batch(
QueueUrl=self.queue_url,
Entries=msgs,
)
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('Invalid status code from sqs: %s' %
resp['ResponseMetadata']['HTTPStatusCode'])
failed_messages = resp.get('Failed')
return failed_messages
def send(self, payloads, logger, num_tries=5):
"""
Enqueue payloads to the SQS queue, retrying failed messages with
exponential backoff.
"""
from time import sleep
backoff_interval = 1
backoff_factor = 2
for try_counter in xrange(0, num_tries):
failed_messages = self.send_without_retry(payloads)
# success!
if not failed_messages:
payloads = []
break
# output some information about the failures for debugging
# purposes. we expect failures to be quite rare, so we can be
# pretty verbose.
if logger:
for msg in failed_messages:
logger.warning('Failed to send message on try %d: Id=%r, '
'SenderFault=%r, Code=%r, Message=%r' %
(try_counter, msg['Id'],
msg.get('SenderFault'), msg.get('Code'),
msg.get('Message')))
# wait a little while, in case the problem is that we're talking
# too fast.
sleep(backoff_interval)
backoff_interval *= backoff_factor
# filter out the failed payloads for retry
retry_payloads = []
for msg in failed_messages:
i = int(msg['Id'])
retry_payloads.append(payloads[i])
payloads = retry_payloads
if payloads:
raise Exception('Messages failed to send to sqs after %d '
'retries: %s' % (num_tries, len(payloads)))
def read(self):
"""read a single message from the queue"""
resp = self.sqs_client.receive_message(
QueueUrl=self.queue_url,
MaxNumberOfMessages=1,
AttributeNames=('SentTimestamp',),
WaitTimeSeconds=self.recv_wait_time_seconds,
)
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('Invalid status code from sqs: %s' %
resp['ResponseMetadata']['HTTPStatusCode'])
msgs = resp.get('Messages')
if not msgs:
return None
assert len(msgs) == 1
msg = msgs[0]
payload = msg['Body']
handle = msg['ReceiptHandle']
timestamp = msg['Attributes']['SentTimestamp']
metadata = dict(timestamp=timestamp)
msg_handle = MessageHandle(handle, payload, metadata)
return msg_handle
def done(self, msg_handle):
"""acknowledge completion of message"""
self.sqs_client.delete_message(
QueueUrl=self.queue_url,
ReceiptHandle=msg_handle.handle,
)
class RawrEnqueuer(object):
"""enqueue coords from expiry grouped by parent zoom"""
def __init__(
self, rawr_queue, toi_intersector, msg_marshaller, group_by_zoom,
logger, stats_handler):
self.rawr_queue = rawr_queue
self.toi_intersector = toi_intersector
self.msg_marshaller = msg_marshaller
self.group_by_zoom = group_by_zoom
self.logger = logger
self.stats_handler = stats_handler
def __call__(self, coords):
# this will produce the intersected list of coordinates with the toi,
# all the way to low zoom level tiles
intersected_coords, intersect_metrics, timing = \
self.toi_intersector(coords)
low_zoom_coord_ints = set()
grouped_by_zoom = defaultdict(list)
for coord in intersected_coords:
if self.group_by_zoom <= coord.zoom:
parent = coord.zoomTo(self.group_by_zoom).container()
parent_coord_int = coord_marshall_int(parent)
grouped_by_zoom[parent_coord_int].append(coord)
else:
coord_int = coord_marshall_int(coord)
low_zoom_coord_ints.add(coord_int)
n_coords = 0
payloads = []
for _, coords in grouped_by_zoom.iteritems():
payload = self.msg_marshaller.marshall(coords)
payloads.append(payload)
n_coords += len(coords)
# add all low zooms into a single payload
low_zoom_coords = []
for coord_int in low_zoom_coord_ints:
coord = coord_unmarshall_int(coord_int)
low_zoom_coords.append(coord)
if low_zoom_coords:
low_zoom_payload = self.msg_marshaller.marshall(low_zoom_coords)
payloads.append(low_zoom_payload)
n_payloads = len(payloads)
rawr_queue_batch_size = 10
n_msgs_sent = 0
for payloads_chunk in grouper(payloads, rawr_queue_batch_size):
self.rawr_queue.send(payloads_chunk, self.logger)
n_msgs_sent += 1
if self.logger:
self.logger.info(
'Rawr tiles enqueued: '
'coords(%d) payloads(%d) enqueue_calls(%d) '
'toi(%d) hits(%d) misses(%d)' %
(n_coords, n_payloads, n_msgs_sent,
intersect_metrics['n_toi'], intersect_metrics['hits'],
intersect_metrics['misses']))
self.stats_handler(n_coords, n_payloads, n_msgs_sent,
intersect_metrics, timing)
def common_parent(coords, parent_zoom):
"""
Return the common parent for coords
Also check that all coords do indeed share the same parent coordinate.
"""
parent = None
for coord in coords:
assert parent_zoom <= coord.zoom
coord_parent = coord.zoomTo(parent_zoom).container()
if parent is None:
parent = coord_parent
else:
assert parent == coord_parent
assert parent is not None, 'No coords?'
return parent
def convert_coord_object(coord):
"""Convert ModestMaps.Core.Coordinate -> raw_tiles.tile.Tile"""
assert isinstance(coord, Coordinate)
coord = coord.container()
return Tile(int(coord.zoom), int(coord.column), int(coord.row))
def unconvert_coord_object(tile):
"""Convert rawr_tiles.tile.Tile -> ModestMaps.Core.Coordinate"""
assert isinstance(tile, Tile)
return Coordinate(zoom=tile.z, column=tile.x, row=tile.y)
def convert_to_coord_ints(coords):
for coord in coords:
coord_int = coord_marshall_int(coord)
yield coord_int
class RawrToiIntersector(object):
"""
Explode and intersect coordinates with the toi
Prior to enqueueing the coordinates that have had their rawr tile
generated, the list should get intersected with the toi.
"""
def __init__(self, s3_client, bucket, key):
self.s3_client = s3_client
self.bucket = bucket
self.key = key
# state to avoid pulling down the whole list every time
self.prev_toi = None
self.etag = None
def tiles_of_interest(self):
"""conditionally get the toi from s3"""
# also return back whether the response was cached
# useful for metrics
is_cached = False
get_options = dict(
Bucket=self.bucket,
Key=self.key,
)
if self.etag:
get_options['IfNoneMatch'] = self.etag
try:
resp = self.s3_client.get_object(**get_options)
except Exception as e:
# boto3 client treats 304 responses as exceptions
if isinstance(e, ClientError):
resp = getattr(e, 'response', None)
assert resp
else:
raise e
status_code = resp['ResponseMetadata']['HTTPStatusCode']
if status_code == 304:
assert self.prev_toi
toi = self.prev_toi
is_cached = True
elif status_code == 200:
body = resp['Body']
try:
gzip_payload = body.read()
finally:
try:
body.close()
except Exception:
pass
gzip_file_obj = StringIO(gzip_payload)
toi = load_set_from_gzipped_fp(gzip_file_obj)
self.prev_toi = toi
self.etag = resp['ETag']
else:
assert 0, 'Unknown status code from toi get: %s' % status_code
return toi, is_cached
def __call__(self, coords):
timing = {}
with time_block(timing, 'total'):
with time_block(timing, 'fetch'):
toi, is_toi_cached = self.tiles_of_interest()
with time_block(timing, 'intersect'):
coord_ints = convert_to_coord_ints(coords)
intersected_coord_ints, intersect_metrics = \
explode_and_intersect(coord_ints, toi)
coords = map(coord_unmarshall_int, intersected_coord_ints)
intersect_metrics['cached'] = is_toi_cached
return coords, intersect_metrics, timing
class EmptyToiIntersector(object):
"""
A RawrToiIntersector which contains no tiles of interest.
Useful for testing and running locally.
"""
def tiles_of_interest(self):
return set([]), False
def __call__(self, coords):
metrics = dict(
total=len(coords),
hits=0,
misses=len(coords),
n_toi=0,
cached=False,
)
timing = dict(
fetch=0,
intersect=0,
)
return [], metrics, timing
class RawrAllIntersector(object):
"""
return back the coordinates themselves
This is useful when we know that we enqueue the full tile pyramids in the
message.
"""
def __call__(self, coords):
metrics = dict(
total=len(coords),
hits=len(coords),
misses=0,
n_toi=0,
cached=False,
)
timing = dict(
fetch=0,
intersect=0,
)
return coords, metrics, timing
class RawrAllWithParentsIntersector(object):
"""
return back the coordinates with their parents
"""
def __init__(self, zoom_stop_inclusive):
self.zoom_stop_inclusive = zoom_stop_inclusive
def __call__(self, coords):
timing = dict(
fetch=0,
intersect=0,
)
with time_block(timing, 'intersect'):
all_coord_ints = set()
for coord in coords:
while coord.zoom >= self.zoom_stop_inclusive:
coord_int = coord_marshall_int(coord)
if coord_int in all_coord_ints:
# as an optimization, assume that if the coord is
# already in the set, then all its parents will be too
break
all_coord_ints.add(coord_int)
coord = coord.zoomBy(-1).container()
coords = imap(coord_unmarshall_int, all_coord_ints)
metrics = dict(
total=len(all_coord_ints),
hits=len(all_coord_ints),
misses=0,
n_toi=0,
cached=False,
)
return coords, metrics, timing
class RawrTileGenerationPipeline(object):
"""Entry point for rawr process command"""
def __init__(
self, rawr_queue, msg_marshaller, group_by_zoom, rawr_gen,
queue_writer, stats_handler, rawr_proc_logger, conn_ctx):
self.rawr_queue = rawr_queue
self.msg_marshaller = msg_marshaller
self.group_by_zoom = group_by_zoom
self.rawr_gen = rawr_gen
self.queue_writer = queue_writer
self.stats_handler = stats_handler
self.rawr_proc_logger = rawr_proc_logger
self.conn_ctx = conn_ctx
def _atexit_log(self):
self.rawr_proc_logger.lifecycle('Processing stopped')
def __call__(self):
self.rawr_proc_logger.lifecycle('Processing started')
import atexit
atexit.register(self._atexit_log)
while True:
timing = {}
try:
# NOTE: it's ok if reading from the queue takes a long time
with time_block(timing, 'queue_read'):
msg_handle = self.rawr_queue.read()
except Exception as e:
self.log_exception(e, 'queue read')
continue
if not msg_handle:
# this gets triggered when no messages are returned
continue
try:
coords = self.msg_marshaller.unmarshall(msg_handle.payload)
except Exception as e:
self.log_exception(e, 'unmarshall payload')
continue
# split coordinates into group by zoom and higher and low zoom
# the message payload is either coordinates that are at group by
# zoom and higher, or all below the group by zoom
is_low_zoom = False
did_rawr_tile_gen = False
for coord in coords:
if coord.zoom < self.group_by_zoom:
is_low_zoom = True
else:
assert not is_low_zoom, \
'Mix of low/high zoom coords in payload'
# check if we need to generate the rawr tile
# proceed directly to enqueueing the coordinates if not
if not is_low_zoom:
did_rawr_tile_gen = True
try:
parent = common_parent(coords, self.group_by_zoom)
except Exception as e:
self.log_exception(e, 'find parent')
continue
try:
rawr_tile_coord = convert_coord_object(parent)
except Exception as e:
self.log_exception(e, 'convert coord', parent)
continue
try:
rawr_gen_timing = {}
with time_block(rawr_gen_timing, 'total'):
# grab connection
with self.conn_ctx() as conn:
# commit transaction
with conn as conn:
# cleanup cursor resources
with conn.cursor() as cur:
table_reader = TableReader(cur)
rawr_gen_specific_timing = self.rawr_gen(
table_reader, rawr_tile_coord)
rawr_gen_timing.update(rawr_gen_specific_timing)
timing['rawr_gen'] = rawr_gen_timing
except Exception as e:
self.log_exception(e, 'rawr tile gen', parent)
continue
try:
with time_block(timing, 'queue_write'):
n_enqueued, n_inflight = \
self.queue_writer.enqueue_batch(coords)
except Exception as e:
self.log_exception(e, 'queue write', parent)
continue
try:
with time_block(timing, 'queue_done'):
self.rawr_queue.done(msg_handle)
except Exception as e:
self.log_exception(e, 'queue done', parent)
continue
try:
self.rawr_proc_logger.processed(
n_enqueued, n_inflight, did_rawr_tile_gen, timing, parent)
except Exception as e:
self.log_exception(e, 'log', parent)
continue
try:
self.stats_handler(
n_enqueued, n_inflight, did_rawr_tile_gen, timing)
except Exception as e:
self.log_exception(e, 'stats', parent)
def log_exception(self, exception, msg, parent_coord=None):
stacktrace = format_stacktrace_one_line()
self.rawr_proc_logger.error(msg, exception, stacktrace, parent_coord)
def make_rawr_zip_payload(rawr_tile, date_time=None):
"""make a zip file from the rawr tile formatted data"""
if date_time is None:
date_time = gmtime()[0:6]
buf = StringIO()
with zipfile.ZipFile(buf, mode='w') as z:
for fmt_data in rawr_tile.all_formatted_data:
zip_info = zipfile.ZipInfo(fmt_data.name, date_time)
z.writestr(zip_info, fmt_data.data, zipfile.ZIP_DEFLATED)
return buf.getvalue()
def unpack_rawr_zip_payload(table_sources, payload):
"""unpack a zipfile and turn it into a callable "tables" object."""
# the io we get from S3 is streaming, so we can't seek on it, but zipfile
# seems to require that. so we buffer it all in memory. RAWR tiles are
# generally up to around 100MB in size, which should be safe to store in
# RAM.
from tilequeue.query.common import Table
from io import BytesIO
zfh = zipfile.ZipFile(BytesIO(payload), 'r')
def get_table(table_name):
# need to extract the whole compressed file from zip reader, as it
# doesn't support .tell() on the filelike, which gzip requires.
data = zfh.open(table_name, 'r').read()
unpacker = Unpacker(file_like=BytesIO(data))
source = table_sources[table_name]
return Table(source, unpacker)
return get_table
def make_rawr_enqueuer(
rawr_queue, toi_intersector, msg_marshaller, group_by_zoom, logger,
stats_handler):
return RawrEnqueuer(
rawr_queue, toi_intersector, msg_marshaller, group_by_zoom, logger,
stats_handler)
class RawrS3Sink(object):
"""Rawr sink to write to s3"""
def __init__(self, s3_client, bucket, prefix, extension, tile_key_gen,
tags=None):
self.s3_client = s3_client
self.bucket = bucket
self.prefix = prefix
self.extension = extension
self.tile_key_gen = tile_key_gen
self.tags = tags
def __call__(self, rawr_tile):
payload = make_rawr_zip_payload(rawr_tile)
coord = unconvert_coord_object(rawr_tile.tile)
key = self.tile_key_gen(self.prefix, coord, self.extension)
put_opts = dict(
Body=payload,
Bucket=self.bucket,
ContentType='application/zip',
ContentLength=len(payload),
Key=key,
)
if self.tags:
put_opts['Tagging'] = urlencode(self.tags)
self.s3_client.put_object(**put_opts)
class RawrNullSink(object):
def __call__(self, rawr_tile):
pass
class RawrStoreSink(object):
"""Rawr sink to write to tilequeue store."""
def __init__(self, store):
self.store = store
def __call__(self, rawr_tile):
payload = make_rawr_zip_payload(rawr_tile)
coord = unconvert_coord_object(rawr_tile.tile)
format = zip_format
self.store.write_tile(payload, coord, format)
# implement the "get_table" interface, but always return an empty list. this
# allows us to fake an empty tile that might not be backed by any real data.
def _empty_table(table_name):
return []
class RawrS3Source(object):
"""Rawr source to read from S3."""
def __init__(self, s3_client, bucket, prefix, extension, table_sources,
tile_key_gen, allow_missing_tiles=False):
self.s3_client = s3_client
self.bucket = bucket
self.prefix = prefix
self.extension = extension
self.table_sources = table_sources
self.tile_key_gen = tile_key_gen
self.allow_missing_tiles = allow_missing_tiles
def _get_object(self, tile):
coord = unconvert_coord_object(tile)
key = self.tile_key_gen(self.prefix, coord, self.extension)
try:
response = self.s3_client.get_object(
Bucket=self.bucket,
Key=key,
)
except Exception, e:
# if we allow missing tiles, then translate a 404 exception into a
# value response. this is useful for local or dev environments
# where we might not have a global build, but don't want the lack
# of RAWR tiles to kill jobs.
if self.allow_missing_tiles and isinstance(e, ClientError):
if e.response['ResponseMetadata']['HTTPStatusCode'] == 404:
return None
raise
return response
def __call__(self, tile):
# throws an exception if the object is missing - RAWR tiles
response = self._get_object(tile)
if response is None:
return _empty_table
# check that the response isn't a delete marker.
assert 'DeleteMarker' not in response
with closing(response['Body']) as body_fp:
body = body_fp.read()
return unpack_rawr_zip_payload(self.table_sources, body)
class RawrStoreSource(object):
"""Rawr source to read from a tilequeue store."""
def __init__(self, store, table_sources):
self.store = store
self.table_sources = table_sources
def _get_object(self, tile):
coord = unconvert_coord_object(tile)
format = zip_format
payload = self.store.read_tile(coord, format)
return payload
def __call__(self, tile):
payload = self._get_object(tile)
return unpack_rawr_zip_payload(self.table_sources, payload)
def make_rawr_queue(name, region, wait_time_secs):
import boto3
sqs_client = boto3.client('sqs', region_name=region)
resp = sqs_client.get_queue_url(QueueName=name)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200, \
'Failed to get queue url for: %s' % name
queue_url = resp['QueueUrl']
from tilequeue.rawr import SqsQueue
rawr_queue = SqsQueue(sqs_client, queue_url, wait_time_secs)
return rawr_queue
class RawrFileQueue(object):
"""A source of RAWR tile jobs loaded from a text file."""
Handle = namedtuple('Handle', 'payload')
def __init__(self, filename, msg_marshaller):
self.queue = []
with open(filename, 'r') as fh:
for line in fh:
coord = deserialize_coord(line)
payload = msg_marshaller.marshall([coord])
self.queue.append(payload)
def read(self):
if len(self.queue) > 0:
payload = self.queue.pop()
return self.Handle(payload)
else:
# nothing left in the queue, and nothing is going to be added to
# the file (although it would be cool if it could `tail` the file,
# that's something for a rainy day...), then rather than block
# forever, we'll just exit.
import sys
sys.exit('RawrMemQueue is empty, all work finished!')
def done(self, handle):
pass
def make_rawr_queue_from_yaml(rawr_queue_yaml, msg_marshaller):
rawr_queue_type = rawr_queue_yaml.get('type', 'sqs')
if rawr_queue_type == 'file':
input_file = rawr_queue_yaml.get('input-file')
assert input_file, 'Missing input-file for memory RAWR queue'
rawr_queue = RawrFileQueue(input_file, msg_marshaller)
else:
name = rawr_queue_yaml.get('name')
assert name, 'Missing rawr queue name'
region = rawr_queue_yaml.get('region')
assert region, 'Missing rawr queue region'
wait_time_secs = rawr_queue_yaml.get('wait-seconds')
assert wait_time_secs is not None, 'Missing rawr queue wait-seconds'
rawr_queue = make_rawr_queue(name, region, wait_time_secs)
return rawr_queue
def make_rawr_enqueuer_from_cfg(cfg, logger, stats_handler, msg_marshaller,
rawr_toi_intersector=None):
from tilequeue.rawr import make_rawr_enqueuer
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
rawr_queue_yaml = rawr_yaml.get('queue')
assert rawr_queue_yaml, 'Missing rawr queue config'
rawr_queue = make_rawr_queue_from_yaml(rawr_queue_yaml, msg_marshaller)
rawr_intersect_yaml = rawr_yaml.get('intersect')
assert rawr_intersect_yaml, 'Missing rawr intersect config'
intersect_type = rawr_intersect_yaml.get('type')
assert intersect_type, 'Missing rawr intersect type'
if rawr_toi_intersector is None:
if intersect_type == 'toi':
toi_yaml = cfg.yml.get('toi-store')
toi_type = toi_yaml.get('type')
assert toi_type == 's3', 'Rawr toi intersector requires toi on s3'
toi_s3_yaml = toi_yaml.get('s3')
assert toi_s3_yaml, 'Missing toi-store s3 config'
toi_bucket = toi_s3_yaml.get('bucket')
toi_key = toi_s3_yaml.get('key')
toi_region = toi_s3_yaml.get('region')
assert toi_bucket, 'Missing toi-store s3 bucket'
assert toi_key, 'Missing toi-store s3 key'
assert toi_region, 'Missing toi-store s3 region'
import boto3
s3_client = boto3.client('s3', region_name=toi_region)
from tilequeue.rawr import RawrToiIntersector
rawr_toi_intersector = RawrToiIntersector(
s3_client, toi_bucket, toi_key)
elif intersect_type == 'none':
from tilequeue.rawr import EmptyToiIntersector
rawr_toi_intersector = EmptyToiIntersector()
elif intersect_type == 'all':
from tilequeue.rawr import RawrAllIntersector
rawr_toi_intersector = RawrAllIntersector()
elif intersect_type == 'all-parents':
from tilequeue.rawr import RawrAllWithParentsIntersector
zoom_stop_inclusive = 0
rawr_toi_intersector = \
RawrAllWithParentsIntersector(zoom_stop_inclusive)
else:
assert 0, 'Invalid rawr intersect type: %s' % intersect_type
return make_rawr_enqueuer(
rawr_queue, rawr_toi_intersector, msg_marshaller, group_by_zoom,
logger, stats_handler)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .pool_usage_metrics import PoolUsageMetrics
from .image_reference import ImageReference
from .node_agent_sku import NodeAgentSku
from .authentication_token_settings import AuthenticationTokenSettings
from .usage_statistics import UsageStatistics
from .resource_statistics import ResourceStatistics
from .pool_statistics import PoolStatistics
from .job_statistics import JobStatistics
from .name_value_pair import NameValuePair
from .delete_certificate_error import DeleteCertificateError
from .certificate import Certificate
from .application_package_reference import ApplicationPackageReference
from .application_summary import ApplicationSummary
from .certificate_add_parameter import CertificateAddParameter
from .file_properties import FileProperties
from .node_file import NodeFile
from .schedule import Schedule
from .job_constraints import JobConstraints
from .resource_file import ResourceFile
from .environment_setting import EnvironmentSetting
from .exit_options import ExitOptions
from .exit_code_mapping import ExitCodeMapping
from .exit_code_range_mapping import ExitCodeRangeMapping
from .exit_conditions import ExitConditions
from .auto_user_specification import AutoUserSpecification
from .user_identity import UserIdentity
from .user_account import UserAccount
from .task_constraints import TaskConstraints
from .job_manager_task import JobManagerTask
from .job_preparation_task import JobPreparationTask
from .job_release_task import JobReleaseTask
from .task_scheduling_policy import TaskSchedulingPolicy
from .start_task import StartTask
from .certificate_reference import CertificateReference
from .metadata_item import MetadataItem
from .cloud_service_configuration import CloudServiceConfiguration
from .os_disk import OSDisk
from .windows_configuration import WindowsConfiguration
from .virtual_machine_configuration import VirtualMachineConfiguration
from .network_configuration import NetworkConfiguration
from .pool_specification import PoolSpecification
from .auto_pool_specification import AutoPoolSpecification
from .pool_information import PoolInformation
from .job_specification import JobSpecification
from .recent_job import RecentJob
from .job_schedule_execution_information import JobScheduleExecutionInformation
from .job_schedule_statistics import JobScheduleStatistics
from .cloud_job_schedule import CloudJobSchedule
from .job_schedule_add_parameter import JobScheduleAddParameter
from .job_scheduling_error import JobSchedulingError
from .job_execution_information import JobExecutionInformation
from .cloud_job import CloudJob
from .job_add_parameter import JobAddParameter
from .task_scheduling_error import TaskSchedulingError
from .job_preparation_task_execution_information import JobPreparationTaskExecutionInformation
from .job_release_task_execution_information import JobReleaseTaskExecutionInformation
from .job_preparation_and_release_task_execution_information import JobPreparationAndReleaseTaskExecutionInformation
from .auto_scale_run_error import AutoScaleRunError
from .auto_scale_run import AutoScaleRun
from .resize_error import ResizeError
from .cloud_pool import CloudPool
from .pool_add_parameter import PoolAddParameter
from .affinity_information import AffinityInformation
from .task_execution_information import TaskExecutionInformation
from .compute_node_information import ComputeNodeInformation
from .multi_instance_settings import MultiInstanceSettings
from .task_statistics import TaskStatistics
from .task_id_range import TaskIdRange
from .task_dependencies import TaskDependencies
from .cloud_task import CloudTask
from .task_add_parameter import TaskAddParameter
from .task_add_collection_parameter import TaskAddCollectionParameter
from .error_message import ErrorMessage
from .batch_error_detail import BatchErrorDetail
from .batch_error import BatchError, BatchErrorException
from .task_add_result import TaskAddResult
from .task_add_collection_result import TaskAddCollectionResult
from .subtask_information import SubtaskInformation
from .cloud_task_list_subtasks_result import CloudTaskListSubtasksResult
from .task_information import TaskInformation
from .start_task_information import StartTaskInformation
from .compute_node_error import ComputeNodeError
from .compute_node import ComputeNode
from .compute_node_user import ComputeNodeUser
from .compute_node_get_remote_login_settings_result import ComputeNodeGetRemoteLoginSettingsResult
from .job_schedule_patch_parameter import JobSchedulePatchParameter
from .job_schedule_update_parameter import JobScheduleUpdateParameter
from .job_disable_parameter import JobDisableParameter
from .job_terminate_parameter import JobTerminateParameter
from .job_patch_parameter import JobPatchParameter
from .job_update_parameter import JobUpdateParameter
from .pool_enable_auto_scale_parameter import PoolEnableAutoScaleParameter
from .pool_evaluate_auto_scale_parameter import PoolEvaluateAutoScaleParameter
from .pool_resize_parameter import PoolResizeParameter
from .pool_update_properties_parameter import PoolUpdatePropertiesParameter
from .pool_upgrade_os_parameter import PoolUpgradeOSParameter
from .pool_patch_parameter import PoolPatchParameter
from .task_update_parameter import TaskUpdateParameter
from .node_update_user_parameter import NodeUpdateUserParameter
from .node_reboot_parameter import NodeRebootParameter
from .node_reimage_parameter import NodeReimageParameter
from .node_disable_scheduling_parameter import NodeDisableSchedulingParameter
from .node_remove_parameter import NodeRemoveParameter
from .application_list_options import ApplicationListOptions
from .application_get_options import ApplicationGetOptions
from .pool_list_usage_metrics_options import PoolListUsageMetricsOptions
from .pool_get_all_lifetime_statistics_options import PoolGetAllLifetimeStatisticsOptions
from .pool_add_options import PoolAddOptions
from .pool_list_options import PoolListOptions
from .pool_delete_options import PoolDeleteOptions
from .pool_exists_options import PoolExistsOptions
from .pool_get_options import PoolGetOptions
from .pool_patch_options import PoolPatchOptions
from .pool_disable_auto_scale_options import PoolDisableAutoScaleOptions
from .pool_enable_auto_scale_options import PoolEnableAutoScaleOptions
from .pool_evaluate_auto_scale_options import PoolEvaluateAutoScaleOptions
from .pool_resize_options import PoolResizeOptions
from .pool_stop_resize_options import PoolStopResizeOptions
from .pool_update_properties_options import PoolUpdatePropertiesOptions
from .pool_upgrade_os_options import PoolUpgradeOsOptions
from .pool_remove_nodes_options import PoolRemoveNodesOptions
from .account_list_node_agent_skus_options import AccountListNodeAgentSkusOptions
from .job_get_all_lifetime_statistics_options import JobGetAllLifetimeStatisticsOptions
from .job_delete_options import JobDeleteOptions
from .job_get_options import JobGetOptions
from .job_patch_options import JobPatchOptions
from .job_update_options import JobUpdateOptions
from .job_disable_options import JobDisableOptions
from .job_enable_options import JobEnableOptions
from .job_terminate_options import JobTerminateOptions
from .job_add_options import JobAddOptions
from .job_list_options import JobListOptions
from .job_list_from_job_schedule_options import JobListFromJobScheduleOptions
from .job_list_preparation_and_release_task_status_options import JobListPreparationAndReleaseTaskStatusOptions
from .certificate_add_options import CertificateAddOptions
from .certificate_list_options import CertificateListOptions
from .certificate_cancel_deletion_options import CertificateCancelDeletionOptions
from .certificate_delete_options import CertificateDeleteOptions
from .certificate_get_options import CertificateGetOptions
from .file_delete_from_task_options import FileDeleteFromTaskOptions
from .file_get_from_task_options import FileGetFromTaskOptions
from .file_get_properties_from_task_options import FileGetPropertiesFromTaskOptions
from .file_delete_from_compute_node_options import FileDeleteFromComputeNodeOptions
from .file_get_from_compute_node_options import FileGetFromComputeNodeOptions
from .file_get_properties_from_compute_node_options import FileGetPropertiesFromComputeNodeOptions
from .file_list_from_task_options import FileListFromTaskOptions
from .file_list_from_compute_node_options import FileListFromComputeNodeOptions
from .job_schedule_exists_options import JobScheduleExistsOptions
from .job_schedule_delete_options import JobScheduleDeleteOptions
from .job_schedule_get_options import JobScheduleGetOptions
from .job_schedule_patch_options import JobSchedulePatchOptions
from .job_schedule_update_options import JobScheduleUpdateOptions
from .job_schedule_disable_options import JobScheduleDisableOptions
from .job_schedule_enable_options import JobScheduleEnableOptions
from .job_schedule_terminate_options import JobScheduleTerminateOptions
from .job_schedule_add_options import JobScheduleAddOptions
from .job_schedule_list_options import JobScheduleListOptions
from .task_add_options import TaskAddOptions
from .task_list_options import TaskListOptions
from .task_add_collection_options import TaskAddCollectionOptions
from .task_delete_options import TaskDeleteOptions
from .task_get_options import TaskGetOptions
from .task_update_options import TaskUpdateOptions
from .task_list_subtasks_options import TaskListSubtasksOptions
from .task_terminate_options import TaskTerminateOptions
from .task_reactivate_options import TaskReactivateOptions
from .compute_node_add_user_options import ComputeNodeAddUserOptions
from .compute_node_delete_user_options import ComputeNodeDeleteUserOptions
from .compute_node_update_user_options import ComputeNodeUpdateUserOptions
from .compute_node_get_options import ComputeNodeGetOptions
from .compute_node_reboot_options import ComputeNodeRebootOptions
from .compute_node_reimage_options import ComputeNodeReimageOptions
from .compute_node_disable_scheduling_options import ComputeNodeDisableSchedulingOptions
from .compute_node_enable_scheduling_options import ComputeNodeEnableSchedulingOptions
from .compute_node_get_remote_login_settings_options import ComputeNodeGetRemoteLoginSettingsOptions
from .compute_node_get_remote_desktop_options import ComputeNodeGetRemoteDesktopOptions
from .compute_node_list_options import ComputeNodeListOptions
from .application_summary_paged import ApplicationSummaryPaged
from .pool_usage_metrics_paged import PoolUsageMetricsPaged
from .cloud_pool_paged import CloudPoolPaged
from .node_agent_sku_paged import NodeAgentSkuPaged
from .cloud_job_paged import CloudJobPaged
from .job_preparation_and_release_task_execution_information_paged import JobPreparationAndReleaseTaskExecutionInformationPaged
from .certificate_paged import CertificatePaged
from .node_file_paged import NodeFilePaged
from .cloud_job_schedule_paged import CloudJobSchedulePaged
from .cloud_task_paged import CloudTaskPaged
from .compute_node_paged import ComputeNodePaged
from .batch_service_client_enums import (
OSType,
AccessScope,
CertificateState,
CertificateFormat,
JobAction,
DependencyAction,
AutoUserScope,
ElevationLevel,
ComputeNodeFillType,
CertificateStoreLocation,
CertificateVisibility,
CachingType,
PoolLifetimeOption,
OnAllTasksComplete,
OnTaskFailure,
JobScheduleState,
SchedulingErrorCategory,
JobState,
JobPreparationTaskState,
JobReleaseTaskState,
PoolState,
AllocationState,
TaskState,
TaskAddStatus,
SubtaskState,
StartTaskState,
ComputeNodeState,
SchedulingState,
DisableJobOption,
ComputeNodeDeallocationOption,
ComputeNodeRebootOption,
ComputeNodeReimageOption,
DisableComputeNodeSchedulingOption,
)
__all__ = [
'PoolUsageMetrics',
'ImageReference',
'NodeAgentSku',
'AuthenticationTokenSettings',
'UsageStatistics',
'ResourceStatistics',
'PoolStatistics',
'JobStatistics',
'NameValuePair',
'DeleteCertificateError',
'Certificate',
'ApplicationPackageReference',
'ApplicationSummary',
'CertificateAddParameter',
'FileProperties',
'NodeFile',
'Schedule',
'JobConstraints',
'ResourceFile',
'EnvironmentSetting',
'ExitOptions',
'ExitCodeMapping',
'ExitCodeRangeMapping',
'ExitConditions',
'AutoUserSpecification',
'UserIdentity',
'UserAccount',
'TaskConstraints',
'JobManagerTask',
'JobPreparationTask',
'JobReleaseTask',
'TaskSchedulingPolicy',
'StartTask',
'CertificateReference',
'MetadataItem',
'CloudServiceConfiguration',
'OSDisk',
'WindowsConfiguration',
'VirtualMachineConfiguration',
'NetworkConfiguration',
'PoolSpecification',
'AutoPoolSpecification',
'PoolInformation',
'JobSpecification',
'RecentJob',
'JobScheduleExecutionInformation',
'JobScheduleStatistics',
'CloudJobSchedule',
'JobScheduleAddParameter',
'JobSchedulingError',
'JobExecutionInformation',
'CloudJob',
'JobAddParameter',
'TaskSchedulingError',
'JobPreparationTaskExecutionInformation',
'JobReleaseTaskExecutionInformation',
'JobPreparationAndReleaseTaskExecutionInformation',
'AutoScaleRunError',
'AutoScaleRun',
'ResizeError',
'CloudPool',
'PoolAddParameter',
'AffinityInformation',
'TaskExecutionInformation',
'ComputeNodeInformation',
'MultiInstanceSettings',
'TaskStatistics',
'TaskIdRange',
'TaskDependencies',
'CloudTask',
'TaskAddParameter',
'TaskAddCollectionParameter',
'ErrorMessage',
'BatchErrorDetail',
'BatchError', 'BatchErrorException',
'TaskAddResult',
'TaskAddCollectionResult',
'SubtaskInformation',
'CloudTaskListSubtasksResult',
'TaskInformation',
'StartTaskInformation',
'ComputeNodeError',
'ComputeNode',
'ComputeNodeUser',
'ComputeNodeGetRemoteLoginSettingsResult',
'JobSchedulePatchParameter',
'JobScheduleUpdateParameter',
'JobDisableParameter',
'JobTerminateParameter',
'JobPatchParameter',
'JobUpdateParameter',
'PoolEnableAutoScaleParameter',
'PoolEvaluateAutoScaleParameter',
'PoolResizeParameter',
'PoolUpdatePropertiesParameter',
'PoolUpgradeOSParameter',
'PoolPatchParameter',
'TaskUpdateParameter',
'NodeUpdateUserParameter',
'NodeRebootParameter',
'NodeReimageParameter',
'NodeDisableSchedulingParameter',
'NodeRemoveParameter',
'ApplicationListOptions',
'ApplicationGetOptions',
'PoolListUsageMetricsOptions',
'PoolGetAllLifetimeStatisticsOptions',
'PoolAddOptions',
'PoolListOptions',
'PoolDeleteOptions',
'PoolExistsOptions',
'PoolGetOptions',
'PoolPatchOptions',
'PoolDisableAutoScaleOptions',
'PoolEnableAutoScaleOptions',
'PoolEvaluateAutoScaleOptions',
'PoolResizeOptions',
'PoolStopResizeOptions',
'PoolUpdatePropertiesOptions',
'PoolUpgradeOsOptions',
'PoolRemoveNodesOptions',
'AccountListNodeAgentSkusOptions',
'JobGetAllLifetimeStatisticsOptions',
'JobDeleteOptions',
'JobGetOptions',
'JobPatchOptions',
'JobUpdateOptions',
'JobDisableOptions',
'JobEnableOptions',
'JobTerminateOptions',
'JobAddOptions',
'JobListOptions',
'JobListFromJobScheduleOptions',
'JobListPreparationAndReleaseTaskStatusOptions',
'CertificateAddOptions',
'CertificateListOptions',
'CertificateCancelDeletionOptions',
'CertificateDeleteOptions',
'CertificateGetOptions',
'FileDeleteFromTaskOptions',
'FileGetFromTaskOptions',
'FileGetPropertiesFromTaskOptions',
'FileDeleteFromComputeNodeOptions',
'FileGetFromComputeNodeOptions',
'FileGetPropertiesFromComputeNodeOptions',
'FileListFromTaskOptions',
'FileListFromComputeNodeOptions',
'JobScheduleExistsOptions',
'JobScheduleDeleteOptions',
'JobScheduleGetOptions',
'JobSchedulePatchOptions',
'JobScheduleUpdateOptions',
'JobScheduleDisableOptions',
'JobScheduleEnableOptions',
'JobScheduleTerminateOptions',
'JobScheduleAddOptions',
'JobScheduleListOptions',
'TaskAddOptions',
'TaskListOptions',
'TaskAddCollectionOptions',
'TaskDeleteOptions',
'TaskGetOptions',
'TaskUpdateOptions',
'TaskListSubtasksOptions',
'TaskTerminateOptions',
'TaskReactivateOptions',
'ComputeNodeAddUserOptions',
'ComputeNodeDeleteUserOptions',
'ComputeNodeUpdateUserOptions',
'ComputeNodeGetOptions',
'ComputeNodeRebootOptions',
'ComputeNodeReimageOptions',
'ComputeNodeDisableSchedulingOptions',
'ComputeNodeEnableSchedulingOptions',
'ComputeNodeGetRemoteLoginSettingsOptions',
'ComputeNodeGetRemoteDesktopOptions',
'ComputeNodeListOptions',
'ApplicationSummaryPaged',
'PoolUsageMetricsPaged',
'CloudPoolPaged',
'NodeAgentSkuPaged',
'CloudJobPaged',
'JobPreparationAndReleaseTaskExecutionInformationPaged',
'CertificatePaged',
'NodeFilePaged',
'CloudJobSchedulePaged',
'CloudTaskPaged',
'ComputeNodePaged',
'OSType',
'AccessScope',
'CertificateState',
'CertificateFormat',
'JobAction',
'DependencyAction',
'AutoUserScope',
'ElevationLevel',
'ComputeNodeFillType',
'CertificateStoreLocation',
'CertificateVisibility',
'CachingType',
'PoolLifetimeOption',
'OnAllTasksComplete',
'OnTaskFailure',
'JobScheduleState',
'SchedulingErrorCategory',
'JobState',
'JobPreparationTaskState',
'JobReleaseTaskState',
'PoolState',
'AllocationState',
'TaskState',
'TaskAddStatus',
'SubtaskState',
'StartTaskState',
'ComputeNodeState',
'SchedulingState',
'DisableJobOption',
'ComputeNodeDeallocationOption',
'ComputeNodeRebootOption',
'ComputeNodeReimageOption',
'DisableComputeNodeSchedulingOption',
]
|
|
"""
Implementation of Deep Residual Network.
References:
[1] "Deep Residual Learning for Image Recognition" https://arxiv.org/pdf/1512.03385.pdf
[2] "Identity Mappings in Deep Residual Networks" https://arxiv.org/pdf/1603.05027.pdf
"""
from keras import backend as K
from keras.layers import Activation, Dense, Flatten, Input
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import AveragePooling2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import add
from keras.models import Model
from keras.utils import plot_model
class ResidualUnit:
"""
Residual unit as described in [1].
"""
def __init__(self, filters, first_conv_strides):
self.filters = filters
self.first_conv_strides = first_conv_strides
def __call__(self, x):
conv1 = Conv2D(filters=self.filters, kernel_size=(3, 3),
strides=self.first_conv_strides, padding='same',
kernel_initializer='glorot_normal')(x)
norm1 = BatchNormalization(axis=3)(conv1)
relu1 = Activation('relu')(norm1)
conv2 = Conv2D(filters=self.filters, kernel_size=(3, 3),
strides=(1, 1), padding='same',
kernel_initializer='glorot_normal')(relu1)
norm2 = BatchNormalization(axis=3)(conv2)
return Activation('relu')(self.shortcut_and_add(x, norm2))
def shortcut_and_add(self, x, residual):
x_shape = K.int_shape(x)
residual_shape = K.int_shape(residual)
shortcut = x
if x_shape != residual_shape:
conv1 = Conv2D(filters=residual_shape[3], kernel_size=(1, 1),
strides=self.first_conv_strides, padding='same',
kernel_initializer='glorot_normal')(x)
shortcut = BatchNormalization(axis=3)(conv1)
return add([shortcut, residual])
class BottleneckResidualUnit(ResidualUnit):
"""
Bottleneck residual unit as described in [1] for ResNet-50/101/152.
"""
def __call__(self, x):
conv1 = Conv2D(filters=self.filters, kernel_size=(1, 1),
strides=self.first_conv_strides, padding='same',
kernel_initializer='glorot_normal')(x)
norm1 = BatchNormalization(axis=3)(conv1)
relu1 = Activation('relu')(norm1)
conv2 = Conv2D(filters=self.filters, kernel_size=(3, 3),
strides=(1, 1), padding='same',
kernel_initializer='glorot_normal')(relu1)
norm2 = BatchNormalization(axis=3)(conv2)
relu2 = Activation('relu')(norm2)
conv3 = Conv2D(filters=self.filters * 4, kernel_size=(3, 3),
strides=(1, 1), padding='same',
kernel_initializer='glorot_normal')(relu2)
norm3 = BatchNormalization(axis=3)(conv3)
return Activation('relu')(self.shortcut_and_add(x, norm3))
class IdentityResidualUnit(ResidualUnit):
"""
Basic residual unit as described in [2].
"""
def __call__(self, x):
norm1 = BatchNormalization(axis=3)(x)
relu1 = Activation('relu')(norm1)
conv1 = Conv2D(filters=self.filters, kernel_size=(3, 3),
strides=self.first_conv_strides, padding='same',
kernel_initializer='glorot_normal')(relu1)
norm2 = BatchNormalization(axis=3)(conv1)
relu2 = Activation('relu')(norm2)
conv2 = Conv2D(filters=self.filters, kernel_size=(3, 3),
strides=(1, 1), padding='same',
kernel_initializer='glorot_normal')(relu2)
return self.shortcut_and_add(x, conv2)
class BottleneckIdentityResidualUnit(ResidualUnit):
"""
Basic residual unit as described in [2].
"""
def __call__(self, x):
norm1 = BatchNormalization(axis=3)(x)
relu1 = Activation('relu')(norm1)
conv1 = Conv2D(filters=self.filters, kernel_size=(3, 3),
strides=self.first_conv_strides, padding='same',
kernel_initializer='glorot_normal')(relu1)
norm2 = BatchNormalization(axis=3)(conv1)
relu2 = Activation('relu')(norm2)
conv2 = Conv2D(filters=self.filters, kernel_size=(3, 3),
strides=(1, 1), padding='same',
kernel_initializer='glorot_normal')(relu2)
norm3 = BatchNormalization(axis=3)(conv2)
relu3 = Activation('relu')(norm3)
conv3 = Conv2D(filters=self.filters * 4, kernel_size=(1, 1),
strides=(1, 1), padding='same',
kernel_initializer='glorot_normal')(relu3)
return self.shortcut_and_add(x, conv3)
class ResidualBlock:
def __init__(self, units, filters, residual_unit_cls, is_first_block=False):
self.filters = filters
self.units = units
self.is_first_block = is_first_block
self.residual_unit_cls = residual_unit_cls
def __call__(self, x):
current = x
for i in range(self.units):
strides = (1, 1)
if not self.is_first_block and i == 0:
strides = (2, 2)
current = self.residual_unit_cls(
filters=self.filters, first_conv_strides=strides)(current)
return current
class WideResidualUnit:
# TODO
pass
def get(input_shape, num_classes, residual_unit_cls, units_per_block):
"""As described in [1]"""
x = Input(shape=input_shape)
conv1 = Conv2D(filters=64, kernel_size=(7, 7),
strides=(2, 2), padding='same',
kernel_initializer='glorot_normal')(x)
norm1 = BatchNormalization(axis=3)(conv1)
relu1 = Activation('relu')(norm1)
current = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(relu1)
filters = 64
for i, units in enumerate(units_per_block):
current = ResidualBlock(units, filters, residual_unit_cls, is_first_block=(i == 0))(current)
filters *= 2
relu1 = Activation('relu')(current)
avg_pool = AveragePooling2D(pool_size=(7, 7), strides=(1, 1))(relu1)
flatten1 = Flatten()(avg_pool)
dense = Dense(units=num_classes, activation='softmax')(flatten1)
return Model(inputs=x, outputs=dense)
def get_18(input_shape, num_classes, unit_cls=ResidualUnit):
"""As described in [1]"""
_validate_non_bottleneck_unit(unit_cls)
return get(input_shape, num_classes, unit_cls, [2, 2, 2, 2])
def get_34(input_shape, num_classes, unit_cls=ResidualUnit):
"""As described in [1]"""
_validate_non_bottleneck_unit(unit_cls)
return get(input_shape, num_classes, unit_cls, [3, 4, 6, 3])
def get_50(input_shape, num_classes, unit_cls=BottleneckResidualUnit):
"""As described in [1]"""
_validate_bottleneck_unit(unit_cls)
return get(input_shape, num_classes, unit_cls, [3, 4, 6, 3])
def get_101(input_shape, num_classes, unit_cls=BottleneckResidualUnit):
"""As described in [1]"""
_validate_bottleneck_unit(unit_cls)
return get(input_shape, num_classes, unit_cls, [3, 4, 23, 3])
def get_152(input_shape, num_classes, unit_cls=BottleneckResidualUnit):
"""As described in [1]"""
_validate_bottleneck_unit(unit_cls)
return get(input_shape, num_classes, unit_cls, [3, 8, 36, 3])
def _validate_non_bottleneck_unit(unit_cls):
if unit_cls not in (ResidualUnit, IdentityResidualUnit):
raise ValueError('Invalid non bottleneck unit')
def _validate_bottleneck_unit(unit_cls):
if unit_cls not in (BottleneckResidualUnit, BottleneckIdentityResidualUnit):
raise ValueError('Invalid bottleneck unit')
if __name__ == '__main__':
resnet34 = get_34((224, 224, 3), 1000)
plot_model(resnet34, to_file='resnet_34png')
resnet101 = get_101((224, 224, 3), 1000)
plot_model(resnet101, to_file='resnet_101.png')
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import time
import uuid
import functools
import pytest
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import HttpResponseError
from azure.mixedreality.remoterendering.aio import RemoteRenderingClient
from azure.mixedreality.remoterendering import (AssetConversionInputSettings,
AssetConversionOutputSettings,
AssetConversionStatus,
RenderingSession,
RenderingSessionSize,
RenderingSessionStatus)
from devtools_testutils import AzureTestCase, PowerShellPreparer
from preparers import RemoteRenderingPreparer
from preparers import RemoteRendererClientPreparer as ClientPreparer
RemoteRendererClientPreparer = functools.partial(ClientPreparer, RemoteRenderingClient)
def create_remote_rendering_client(remoterendering_arr_service_endpoint,
remoterendering_arr_account_id,
remoterendering_arr_account_domain,
remoterendering_arr_account_key):
key_credential = AzureKeyCredential(remoterendering_arr_account_key)
client = RemoteRenderingClient(
endpoint=remoterendering_arr_service_endpoint,
account_id=remoterendering_arr_account_id,
account_domain=remoterendering_arr_account_domain,
credential=key_credential)
return client
class ClientTests(AzureTestCase):
def get_var(self, variable_name, default_or_playback_value):
if self.is_live:
return os.environ.get(variable_name, default_or_playback_value)
return default_or_playback_value
@RemoteRenderingPreparer()
def test_create_client(self,
remoterendering_arr_service_endpoint,
remoterendering_arr_account_id,
remoterendering_arr_account_domain,
remoterendering_arr_account_key):
client = create_remote_rendering_client(remoterendering_arr_service_endpoint,
remoterendering_arr_account_id,
remoterendering_arr_account_domain,
remoterendering_arr_account_key)
assert client is not None
@RemoteRenderingPreparer()
def test_create_client_with_invalid_arguments(self,
remoterendering_arr_service_endpoint,
remoterendering_arr_account_id,
remoterendering_arr_account_domain,
remoterendering_arr_account_key):
key_credential = AzureKeyCredential(remoterendering_arr_account_key)
with pytest.raises(ValueError):
RemoteRenderingClient(
endpoint=None,
account_id=remoterendering_arr_account_id,
account_domain=remoterendering_arr_account_domain,
credential=key_credential)
with pytest.raises(ValueError):
RemoteRenderingClient(
endpoint=remoterendering_arr_service_endpoint,
account_id=None,
account_domain=remoterendering_arr_account_domain,
credential=key_credential)
with pytest.raises(ValueError):
RemoteRenderingClient(
endpoint=remoterendering_arr_service_endpoint,
account_id=remoterendering_arr_account_id,
account_domain=None,
credential=key_credential)
with pytest.raises(ValueError):
RemoteRenderingClient(
endpoint=remoterendering_arr_service_endpoint,
account_id=remoterendering_arr_account_id,
account_domain=remoterendering_arr_account_domain,
credential=None)
with pytest.raises(ValueError):
RemoteRenderingClient(
endpoint=remoterendering_arr_service_endpoint,
account_id=remoterendering_arr_account_id,
account_domain=remoterendering_arr_account_domain,
credential=key_credential,
authentication_endpoint_url="#")
@RemoteRenderingPreparer()
@RemoteRendererClientPreparer()
async def test_simple_conversion(self,
client,
remoterendering_arr_storage_account_name,
remoterendering_storage_endpoint_suffix,
remoterendering_arr_blob_container_name,
remoterendering_arr_sas_token
):
if self.is_live:
conversion_id = str(uuid.uuid4())
else:
conversion_id = "1724f808-17c6-4058-93c8-f39c2a84b0b7"
storage_container_uri = "https://"+remoterendering_arr_storage_account_name + \
".blob."+remoterendering_storage_endpoint_suffix+"/"+remoterendering_arr_blob_container_name
input_settings = AssetConversionInputSettings(
storage_container_uri=storage_container_uri,
relative_input_asset_path="testBox.fbx",
blob_prefix="Input",
storage_container_read_list_sas="?"+remoterendering_arr_sas_token
)
output_settings = AssetConversionOutputSettings(
storage_container_uri=storage_container_uri,
blob_prefix=conversion_id,
storage_container_write_sas="?"+remoterendering_arr_sas_token
)
conversion_poller = await client.begin_asset_conversion(
conversion_id=conversion_id, input_settings=input_settings, output_settings=output_settings
)
conversion = await client.get_asset_conversion(conversion_id)
assert conversion.id == conversion_id
assert conversion.settings.input_settings.relative_input_asset_path == input_settings.relative_input_asset_path
assert conversion.status != AssetConversionStatus.FAILED
finished_conversion = await conversion_poller.result()
assert finished_conversion.id == conversion_id
assert finished_conversion.settings.input_settings.relative_input_asset_path == input_settings.relative_input_asset_path
assert finished_conversion.status == AssetConversionStatus.SUCCEEDED
finished_conversion.output.asset_uri.endswith(conversion_id+"/testBox.arrAsset")
foundConversion = False
conversions = await client.list_asset_conversions()
async for c in conversions:
if(c.id == conversion_id):
foundConversion = True
break
assert foundConversion == True
@RemoteRenderingPreparer()
async def test_failed_conversion_unauthorized(self,
remoterendering_arr_service_endpoint,
remoterendering_arr_account_id,
remoterendering_arr_account_domain,
remoterendering_arr_account_key,
remoterendering_arr_storage_account_name,
remoterendering_storage_endpoint_suffix,
remoterendering_arr_blob_container_name,
remoterendering_arr_sas_token
):
client = create_remote_rendering_client(remoterendering_arr_service_endpoint,
remoterendering_arr_account_id,
remoterendering_arr_account_domain,
"thisisnotthekey")
if self.is_live:
conversion_id = str(uuid.uuid4())
else:
conversion_id = "1724f808-17c6-4058-93c8-f39c2a84b0b7"
storage_container_uri = "https://"+remoterendering_arr_storage_account_name + \
".blob."+remoterendering_storage_endpoint_suffix+"/"+remoterendering_arr_blob_container_name
input_settings = AssetConversionInputSettings(
storage_container_uri=storage_container_uri,
relative_input_asset_path="testBox.fbx",
blob_prefix="Input"
# Do not provide SAS access to the container, and assume the test account is not linked to the storage.
)
output_settings = AssetConversionOutputSettings(
storage_container_uri=storage_container_uri,
blob_prefix=conversion_id
# Do not provide SAS access to the container, and assume the test account is not linked to the storage.
)
with pytest.raises(HttpResponseError) as excinfo:
# make the request which cannot access the storage account
conversion_poller = await client.begin_asset_conversion(
conversion_id=conversion_id, input_settings=input_settings, output_settings=output_settings
)
exception = excinfo.value
assert exception.status_code == 401
assert "Unauthorized" in exception.message
@RemoteRenderingPreparer()
@RemoteRendererClientPreparer()
async def test_failed_conversion_no_access(self,
client,
remoterendering_arr_storage_account_name,
remoterendering_storage_endpoint_suffix,
remoterendering_arr_blob_container_name,
remoterendering_arr_sas_token
):
if self.is_live:
conversion_id = str(uuid.uuid4())
else:
conversion_id = "b994f753-8835-426f-9b04-af990407acca"
storage_container_uri = "https://"+remoterendering_arr_storage_account_name + \
".blob."+remoterendering_storage_endpoint_suffix+"/"+remoterendering_arr_blob_container_name
input_settings = AssetConversionInputSettings(
storage_container_uri=storage_container_uri,
relative_input_asset_path="testBox.fbx",
blob_prefix="Input"
# Do not provide SAS access to the container, and assume the test account is not linked to the storage.
)
output_settings = AssetConversionOutputSettings(
storage_container_uri=storage_container_uri,
blob_prefix=conversion_id
# Do not provide SAS access to the container, and assume the test account is not linked to the storage.
)
with pytest.raises(HttpResponseError) as excinfo:
# make the request which cannot access the storage account
conversion_poller = await client.begin_asset_conversion(
conversion_id=conversion_id, input_settings=input_settings, output_settings=output_settings
)
assert excinfo.value.status_code == 403
error_details = excinfo.value
assert "storage" in error_details.message
assert "permissions" in error_details.message
@RemoteRenderingPreparer()
@RemoteRendererClientPreparer()
async def test_failed_conversion_missing_asset(self,
client,
remoterendering_arr_storage_account_name,
remoterendering_storage_endpoint_suffix,
remoterendering_arr_blob_container_name,
remoterendering_arr_sas_token
):
if self.is_live:
conversion_id = str(uuid.uuid4())
else:
conversion_id = "3ff6ab5c-600a-4892-bae9-348f215b1fa4"
storage_container_uri = "https://"+remoterendering_arr_storage_account_name + \
".blob."+remoterendering_storage_endpoint_suffix+"/"+remoterendering_arr_blob_container_name
input_settings = AssetConversionInputSettings(
storage_container_uri=storage_container_uri,
relative_input_asset_path="testBoxWhichDoesNotExist.fbx",
blob_prefix="Input",
storage_container_read_list_sas="?"+remoterendering_arr_sas_token
)
output_settings = AssetConversionOutputSettings(
storage_container_uri=storage_container_uri,
blob_prefix=conversion_id,
storage_container_write_sas="?"+remoterendering_arr_sas_token
)
with pytest.raises(HttpResponseError) as excinfo:
conversion_poller = await client.begin_asset_conversion(
conversion_id=conversion_id, input_settings=input_settings, output_settings=output_settings
)
await conversion_poller.result()
error_details = excinfo.value
assert "invalid input" in error_details.error.message.lower()
assert "logs" in error_details.error.message.lower()
@RemoteRenderingPreparer()
@RemoteRendererClientPreparer()
async def test_simple_session(self,
client
):
if self.is_live:
session_id = str(uuid.uuid4())
else:
session_id = "f3fd6db9-86c6-4bee-b652-fb1fc0dde08e"
session_poller = await client.begin_rendering_session(
session_id=session_id, size=RenderingSessionSize.STANDARD, lease_time_minutes=15)
session = await client.get_rendering_session(session_id)
assert session.id == session_id
assert session.size == RenderingSessionSize.STANDARD
assert session.lease_time_minutes == 15
assert session.status != RenderingSessionStatus.ERROR
ready_session = await session_poller.result()
assert ready_session.id == session_id
assert ready_session.size == RenderingSessionSize.STANDARD
assert ready_session.lease_time_minutes == 15
assert ready_session.status == RenderingSessionStatus.READY
assert ready_session.hostname
assert ready_session.arr_inspector_port is not None
assert ready_session.handshake_port is not None
extended_session = await client.update_rendering_session(session_id=session_id, lease_time_minutes=20)
assert extended_session.id == session_id
assert extended_session.size == RenderingSessionSize.STANDARD
assert extended_session.lease_time_minutes == 15 or extended_session.lease_time_minutes == 20
assert extended_session.status == RenderingSessionStatus.READY
foundSession = False
async for s in await client.list_rendering_sessions():
if s.id == session_id:
foundSession = True
break
assert foundSession == True
await client.stop_rendering_session(session_id)
stopped_session = await client.get_rendering_session(session_id)
assert stopped_session.status == RenderingSessionStatus.STOPPED
@RemoteRenderingPreparer()
@RemoteRendererClientPreparer()
async def test_failed_session_request(self,
client
):
if self.is_live:
session_id = str(uuid.uuid4())
else:
session_id = "dbab9c99-6971-4fbd-84c3-b00445ec3c04"
with pytest.raises(HttpResponseError) as excinfo:
# Make an invalid request (negative lease time).
session_poller = await client.begin_rendering_session(
session_id=session_id, size=RenderingSessionSize.STANDARD, lease_time_minutes=-4)
assert excinfo.value.status_code == 400
exception = excinfo.value
assert "lease" in exception.message.lower()
assert "negative" in exception.message.lower()
|
|
import time
import hmac
import hashlib
import base64
import urllib
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import task
from flask_restful import fields, marshal
import requests
from floranet.appserver.azure_iot import AzureIot
from floranet.models.application import Application
from floranet.models.appproperty import AppProperty
from floranet.models.device import Device
from floranet.log import log
class AzureIotHttps(AzureIot):
"""LoRa application server interface to Microsoft Azure IoT platform,
using HTTPS protocol.
For the HTTPS protocol, Azure IOT requires us to poll the IoT Hub
for cloud-to-device messages. Under current guidelines, each
device should poll for messages every 25 minutes or more. The interval
property is set as poll_interval (in minutes).
Attributes:
netserver (Netserver): The network server object
appinterface (AppInterface): The related AppInterface
iothost (str): Azure IOT host name
keyname (str): Azure IOT key name
keyvalue (str): Azure IOT key value
poll_interval (int): Polling interval, in minutes
started (bool): State flag
polling (bool): Polling task flag
"""
TABLENAME = 'appif_azure_iot_https'
HASMANY = [{'name': 'appinterfaces', 'class_name': 'AppInterface', 'as': 'interfaces'}]
API_VERSION = '2016-02-03'
TOKEN_VALID_SECS = 300
TIMEOUT = 10.0
def afterInit(self):
self.netserver = None
self.appinterface = None
self.started = False
self.polling = False
@inlineCallbacks
def valid(self):
"""Validate an AzureIotHttps object.
Returns:
valid (bool), message(dict): (True, empty) on success,
(False, error message dict) otherwise.
"""
messages = {}
# Check polling interval
if self.poll_interval < 25:
messages['poll_interval'] = "Polling interval must be at least " \
"25 minutes."
valid = not any(messages)
returnValue((valid, messages))
yield
def marshal(self):
"""Get REST API marshalled fields as an orderedDict
Returns:
OrderedDict of fields defined by marshal_fields
"""
marshal_fields = {
'type': fields.String(attribute='__class__.__name__'),
'id': fields.Integer(attribute='appinterface.id'),
'name': fields.String,
'iothost': fields.String,
'keyname': fields.String,
'keyvalue': fields.String,
'poll_interval': fields.Integer,
'started': fields.Boolean,
}
return marshal(self, marshal_fields)
@inlineCallbacks
def start(self, netserver):
"""Start the application interface
Args:
netserver (NetServer): The LoRa network server
Returns True on success, False otherwise
"""
self.netserver = netserver
self.polling = False
if not hasattr(self, 'task'):
self.task = task.LoopingCall(self._pollInboundMessages)
# Setup the looping task to query for messages
self.task.start(self.poll_interval * 60)
# Set the running flag
self.started = True
returnValue(True)
yield
def active(self):
"""Return active status"""
return self.started
def stop(self):
"""Stop the application interface"""
# Stop the looping task
self.polling = False
self.task.stop()
self.started = False
@inlineCallbacks
def netServerReceived(self, device, app, port, appdata):
"""Receive application data from the network server
We issue a POST request to the Azure IOT hub host with appdata
as the data parameter.
Args:
device (Device): LoRa device object
app (Application): device's application
port (int): fport of the frame payload
appdata (str): Application data
"""
if not self.started:
returnValue(None)
# Map the device name the Azure IOT deviceId
devid = device.appname if device.appname else device.name
prop = yield AppProperty.find(where=['application_id = ? and port = ?',
app.id, port], limit=1)
# If the property is not found, send the data as is.
if prop is None:
data = appdata
else:
# Create the Azure message.
data = self._azureMessage(devid, prop, appdata)
if data is None:
log.debug("Application interface {name} could not create "
"message for property {prop}", name=self.name, prop=prop.name)
returnValue(None)
# Form the URL, headers and parameters
url = 'https://{}/devices/{}/messages/events'.format(
self.iothost.lower(), devid.lower())
resuri = '{}/devices/{}'.format(self.iothost, devid)
headers = {'Authorization': self._iotHubSasToken(resuri)}
params = {'api-version': self.API_VERSION}
# Issue the POST request
try:
r = requests.post(url, headers=headers,
params=params, data=data, timeout=self.TIMEOUT)
except requests.exceptions.RequestException:
log.debug("Application interface {name} could not send to "
"Azure IOT Hub {host} for device ID {device}",
name=self.name, host=self.iothost, device=devid)
@inlineCallbacks
def _pollInboundMessages(self):
"""Poll Azure IOT hub for inbound messages and forward
them to the Network Server"""
# If we are running, return
if self.polling is True:
returnValue(None)
log.info("Azure IoT HTTPS interface '{name}' commencing "
"polling loop", name=self.name)
self.polling = True
# Get the applications associated with this interface.
apps = yield Application.find(where=['appinterface_id = ?', self.appinterface.id])
if apps is None:
self.polling = False
returnValue(None)
# Loop through the applications
for app in apps:
# Poll all devices associated with this app
devices = yield Device.find(where=['appeui = ?', app.appeui])
if devices is None:
returnValue(None)
for device in devices:
# Use the device appname property for the Azure devid,
# if it exists. Otherwise, use the device name property
devid = device.appname if device.appname else device.name
# Form the url, headers and parameters
url = 'https://{}/devices/{}/messages/devicebound'.format(
self.iothost, devid)
resuri = '{}/devices/{}'.format(self.iothost, devid)
headers = {'Authorization': self._iotHubSasToken(resuri)}
params = {'api-version': self.API_VERSION}
# Make the request, catch any exceptions
try:
r = requests.get(url, headers=headers,
params=params, timeout=self.TIMEOUT)
except requests.exceptions.RequestException:
log.debug("Application interface {name} could not poll "
"Azure IOT Hub {host} for device ID {device}",
name=self.name, host=self.iothost, device=devid)
continue
# Response code 204 indicates there is no data to be sent.
if r.status_code == 204:
continue
# Response code 200 means we have data to send to the device
elif r.status_code == 200:
appdata = r.content
self.netserver.inboundAppMessage(device.devaddr, appdata)
self.polling = False
def datagramReceived(self, data, (host, port)):
"""Receive inbound application server data"""
pass
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-09-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-09-01"
self.config = config
def create_or_update(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual
network gateway operation.
:type parameters: :class:`VirtualNetworkGateway
<azure.mgmt.network.models.VirtualNetworkGateway>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualNetworkGateway
<azure.mgmt.network.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkGateway
<azure.mgmt.network.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkGatewayPaged
<azure.mgmt.network.models.VirtualNetworkGatewayPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def reset(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, **operation_config):
"""Resets the primary of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to
the begin reset of the active-active feature enabled gateway.
:type gateway_vip: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualNetworkGateway
<azure.mgmt.network.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def generatevpnclientpackage(
self, resource_group_name, virtual_network_gateway_name, processor_architecture, custom_headers=None, raw=False, **operation_config):
"""Generates VPN client package for P2S client of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param processor_architecture: VPN client Processor Architecture.
Possible values are: 'AMD64' and 'X86'. Possible values include:
'Amd64', 'X86'
:type processor_architecture: str or :class:`ProcessorArchitecture
<azure.mgmt.network.models.ProcessorArchitecture>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: str
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.VpnClientParameters(processor_architecture=processor_architecture)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
from UnitCommands import *
from Landing import *
import math
VALID_ROE = 100
def GetMessageParam(BB, messageName):
msg = BB.ReadMessage(messageName)
if (msg == ''):
return 0
else:
return float(msg)
def RadarOn(TI):
UI = TI.GetPlatformInterface()
BB = TI.GetBlackboardInterface()
n_sensors = UI.GetSensorCount()
for n in range(0, n_sensors):
sens_info = UI.GetSensorInfo(n)
# 1 radar, 2 ESM, 4 passive sonar, 8 active sonar, 16 optical, 0 error
if (sens_info.type == 1):
UI.SetSensorState(n, 1)
TI.EndTask()
# intermittent so that radar is on about 25% of the time within group, not synchronized
# across group so this isn't ideal
def RadarRandom(TI):
UI = TI.GetPlatformInterface()
TI.SetUpdateInterval(90.0)
nGroup = 1
mission = UI.GetMissionInfo()
if (mission.IsValid()):
nGroup = mission.GetMissionAircraftCount()
p_on = 0.25 / float(nGroup)
state = UI.Rand() < p_on
n_sensors = UI.GetSensorCount()
for n in range(0, n_sensors):
sens_info = UI.GetSensorInfo(n)
# 1 radar, 2 ESM, 4 passive sonar, 8 active sonar, 16 optical, 0 error
if (sens_info.type == 1):
UI.SetSensorState(n, state)
def RadarOff(TI):
UI = TI.GetPlatformInterface()
BB = TI.GetBlackboardInterface()
n_sensors = UI.GetSensorCount()
for n in range(0, n_sensors):
sens_info = UI.GetSensorInfo(n)
# 1 radar, 2 ESM, 4 passive sonar, 8 active sonar, 16 optical, 0 error
if (sens_info.type == 1):
UI.SetSensorState(n, 0)
TI.EndTask()
def ActiveSonarOn(TI):
UI = TI.GetPlatformInterface()
BB = TI.GetBlackboardInterface()
n_sensors = UI.GetSensorCount()
for n in range(0, n_sensors):
sens_info = UI.GetSensorInfo(n)
# 1 radar, 2 ESM, 4 passive sonar, 8 active sonar, 16 optical, 0 error
if (sens_info.type == 8):
UI.SetSensorState(n, 1)
TI.EndTask()
def ActiveSonarOff(TI):
UI = TI.GetPlatformInterface()
BB = TI.GetBlackboardInterface()
n_sensors = UI.GetSensorCount()
for n in range(0, n_sensors):
sens_info = UI.GetSensorInfo(n)
# 1 radar, 2 ESM, 4 passive sonar, 8 active sonar, 16 optical, 0 error
if (sens_info.type == 8):
UI.SetSensorState(n, 0)
TI.EndTask()
def WaitForGroup(TI):
BB = TI.GetBlackboardInterface()
if (not GetConnControl(BB)):
TI.EndTask() # cant get controls so just end
return
TI.SetUpdateInterval(30.0)
UI = TI.GetPlatformInterface()
alt_m = UI.GetAltitude()
cruise_speed = UI.GetCruiseSpeedForAltitude(alt_m)
UI.SetSpeed(0.8*cruise_speed)
UI.SetHeading(UI.GetHeading() + 90)
iteration = TI.GetMemoryValue(1)
iteration = iteration + 1
TI.SetMemoryValue(1, iteration)
mission = UI.GetMissionInfo()
if (mission.IsValid()):
lagging_waypoint = mission.GetSmallestWaypointIndex()
my_waypoint = UI.GetCurrentWaypoint()
if ((lagging_waypoint == my_waypoint) or (iteration > 100)): # dont wait more than about 50 min
UI.SetSpeed(cruise_speed)
TI.EndTask()
else:
TI.EndTask()
# randomly move throughout area
def AirPatrolArea(TI):
BB = TI.GetBlackboardInterface()
if (not GetConnControl(BB)):
#UI.DisplayMessage('High priority task has control')
TI.EndTask() # cant get controls so just end
return
UI = TI.GetPlatformInterface()
if (not UI.IsAir()):
#UI.DisplayMessage('Not an aircraft')
TI.EndTask()
return
state = TI.GetMemoryValue(2) # 0 - uninit, 1 - moving to new waypoint
if (state == 0):
TI.SetMemoryValue(2, 1)
patrol_point = UI.GetRandomPatrolPoint()
if ((patrol_point.lon == 0) and (patrol_point.lat == 0)):
UI.DisplayMessage('No patrol area')
TI.EndTask() # probably dont have a patrol area defined
return
TI.SetMemoryValue(11, patrol_point.lon)
TI.SetMemoryValue(12, patrol_point.lat)
lon_rad = TI.GetMemoryValue(11)
lat_rad = TI.GetMemoryValue(12)
range_km = UI.GetRangeToDatum(lon_rad, lat_rad)
if (range_km < 2): # set new patrol point
patrol_point = UI.GetRandomPatrolPoint()
TI.SetMemoryValue(11, patrol_point.lon)
TI.SetMemoryValue(12, patrol_point.lat)
lon_rad = patrol_point.lon
lat_rad = patrol_point.lat
elif (range_km < 25):
TI.SetUpdateInterval(15.0)
else:
TI.SetUpdateInterval(45.0)
hdg_deg = UI.GetHeadingToDatum(lon_rad, lat_rad)
UI.SetHeading(hdg_deg)
# For ASW helicopter, randomly move throughout area, stopping to drop sonobuoys and use dipping sonar
def ASWPatrolArea(TI):
BB = TI.GetBlackboardInterface()
if (not GetConnControl(BB)):
TI.EndTask() # cant get controls so just end
return
UI = TI.GetPlatformInterface()
state = TI.GetMemoryValue(2) # 0 - uninit, 1 - moving to new waypoint, 2 - coming to hover, 3 - drop buoy, 4 - attack target, listen
if (state == 0):
if (not UI.IsHelo()):
TI.EndTask()
return
UI.SetAllSensorState(1)
TI.SetMemoryValue(2, 1)
patrol_point = UI.GetRandomPatrolPoint()
if ((patrol_point.lon == 0) and (patrol_point.lat == 0)):
TI.EndTask() # probably dont have a patrol area defined
return
TI.SetMemoryValue(11, patrol_point.lon)
TI.SetMemoryValue(12, patrol_point.lat)
TI.SetMemoryValue(50, HasSonar(UI))
state = 1
# check for bingo fuel and cancel task if so, 18DEC2010 commented out, should use RTB task to do this
# t = UI.GetTime()
# last_bingo_check = TI.GetMemoryValue(20)
# if ((t - last_bingo_check) > 60):
# TI.SetMemoryValue(20, t)
# if (CheckBingo(UI, BB)):
# cruise_alt = UI.GetCruiseAltitude()
# UI.SetAltitude(cruise_alt)
# UI.SetSpeed(UI.GetCruiseSpeedForAltitude(cruise_alt))
# ReleaseConnControl(BB)
# TI.EndTask()
# return
has_sonar = TI.GetMemoryValue(50)
my_track = UI.GetTrackById(UI.GetPlatformId())
buoy_spacing_km = 6.0
if (state == 1):
# check for a sub target to attack
best_target, best_launcher = GetSubTarget(UI)
if (best_target != -1):
tgt_track = UI.GetTargetTrackInfo()
TI.SetMemoryValue(11, tgt_track.Lon)
TI.SetMemoryValue(12, tgt_track.Lat)
#UI.DisplayMessage('sub %d, %f,%f' % (tgt_track.ID, tgt_track.Lon, tgt_track.Lat))
lon_rad = TI.GetMemoryValue(11)
lat_rad = TI.GetMemoryValue(12)
range_km = UI.GetRangeToDatum(lon_rad, lat_rad)
if (range_km < 0.3): # reached waypoint
# pick next search waypoint
patrol_point = UI.GetRandomPatrolPoint()
TI.SetMemoryValue(11, patrol_point.lon)
TI.SetMemoryValue(12, patrol_point.lat)
TI.SetMemoryValue(2, 2)
elif (range_km < 3):
TI.SetUpdateInterval(15.0)
UI.SetSpeed(0.5*UI.GetMaxSpeed())
else:
TI.SetUpdateInterval(45.0)
UI.SetSpeed(0.8*UI.GetMaxSpeed())
hdg_deg = UI.GetHeadingToDatum(lon_rad, lat_rad)
UI.SetHeading(hdg_deg)
UI.SetAltitude(500)
# check how far we are from last buoy drop and drop another if far enough
last_lon_rad = TI.GetMemoryValue(61)
last_lat_rad = TI.GetMemoryValue(62)
if ((last_lon_rad != 0) and (UI.GetRangeToDatum(last_lon_rad, last_lat_rad) >= 5.0)):
TI.SetMemoryValue(2, 5)
return
if (state == 2):
TI.SetUpdateInterval(30.0)
if (has_sonar):
goal_alt_m = 50
goal_speed_kts = 3
else:
goal_alt_m = 300
goal_speed_kts = 40
UI.SetAltitude(goal_alt_m)
UI.SetSpeed(goal_speed_kts)
if ((UI.GetAltitude() > (goal_alt_m+1)) or (UI.GetSpeed() > goal_speed_kts)):
return
TI.SetMemoryValue(2, 3)
state = 3
if (state == 3):
nLaunchers = UI.GetLauncherCount()
for n in range(0, nLaunchers):
launcher_info = UI.GetLauncherInfo(n)
if (launcher_info.LaunchMode == 11): # sonobuoy
if (not IsSonobuoyWithin(UI, buoy_spacing_km)):
lon_rad = TI.GetMemoryValue(11)
lat_rad = TI.GetMemoryValue(12)
UI.SendDatumToLauncher(lon_rad, lat_rad, 0, n)
UI.Launch(n, 1)
# update last drop point
TI.SetMemoryValue(61, my_track.Lon)
TI.SetMemoryValue(62, my_track.Lat)
break
n_sensors = UI.GetSensorCount()
for n in range(0, n_sensors):
sens_info = UI.GetSensorInfo(n)
# 1 radar, 2 ESM, 4 passive sonar, 8 active sonar, 16 optical, 0 error
if ((sens_info.type == 4) or (sens_info.type == 8)):
UI.SetSensorState(n, 1)
TI.SetMemoryValue(2, 4)
TI.SetUpdateInterval(10.0)
return
if (state == 4):
# check for a sub target to attack
target_id, launcher_idx = GetSubTarget(UI)
if ((target_id != -1) and (launcher_idx != -1)):
UI.SetTarget(target_id) # double check target is set
range_km = UI.GetRangeToTarget()
if (range_km < 4):
target_info = UI.GetTargetTrackInfo()
UI.SendDatumToLauncher(target_info.Lon, target_info.Lat, target_info.Alt, launcher_idx)
UI.Launch(launcher_idx, 1)
else:
TI.SetUpdateInterval(10.0) # reposition to get closer to target
else:
if (has_sonar):
TI.SetUpdateInterval(240.0) # hover for a while if helo has sonar
else:
TI.SetUpdateInterval(10.0)
TI.SetMemoryValue(2, 1)
if (state == 5): # drop a buoy while flying leg to next point
# drop buoy
nLaunchers = UI.GetLauncherCount()
for n in range(0, nLaunchers):
launcher_info = UI.GetLauncherInfo(n)
if (launcher_info.LaunchMode == 11): # sonobuoy
if (not IsSonobuoyWithin(UI, buoy_spacing_km)):
lon_rad = TI.GetMemoryValue(11)
lat_rad = TI.GetMemoryValue(12)
UI.SendDatumToLauncher(lon_rad, lat_rad, 0, n)
UI.Launch(n, 1)
break
# update last drop point
TI.SetMemoryValue(61, my_track.Lon)
TI.SetMemoryValue(62, my_track.Lat)
# return to state 1
TI.SetUpdateInterval(10.0)
TI.SetMemoryValue(2, 1)
# For fixed-wing aircraft, randomly move throughout area, dropping sonobuoys
def ASWPatrolAreaFW(TI):
BB = TI.GetBlackboardInterface()
if (not GetConnControl(BB)):
TI.EndTask() # cant get controls so just end
return
UI = TI.GetPlatformInterface()
state = TI.GetMemoryValue(2) # 0 - uninit, 1 - moving to new waypoint, 2 - coming to hover, 3 - drop buoy, 4 - attack target, listen
if (state == 0):
if (UI.IsHelo()):
TI.EndTask() # fixed-wing only for this task
return
UI.SetAllSensorState(1)
TI.SetMemoryValue(2, 1)
patrol_point = UI.GetRandomPatrolPoint()
if ((patrol_point.lon == 0) and (patrol_point.lat == 0)):
TI.EndTask() # probably dont have a patrol area defined
return
TI.SetMemoryValue(11, patrol_point.lon)
TI.SetMemoryValue(12, patrol_point.lat)
state = 1
buoy_spacing_km = 5.0
my_track = UI.GetTrackById(UI.GetPlatformId())
if (state == 1):
cruise_kts = UI.GetCruiseSpeedForAltitude(UI.GetAltitude())
UI.SetSpeed(cruise_kts)
UI.SetAltitude(2000)
# check for a sub target to attack
best_target, best_launcher = GetSubTarget(UI)
if (best_target != -1):
tgt_track = UI.GetTargetTrackInfo()
TI.SetMemoryValue(11, tgt_track.Lon)
TI.SetMemoryValue(12, tgt_track.Lat)
#UI.DisplayMessage('sub %d, %f,%f' % (tgt_track.ID, tgt_track.Lon, tgt_track.Lat))
lon_rad = TI.GetMemoryValue(11)
lat_rad = TI.GetMemoryValue(12)
range_km = UI.GetRangeToDatum(lon_rad, lat_rad)
if (range_km < 1.0): # reached waypoint
# pick next search waypoint
patrol_point = UI.GetRandomPatrolPoint()
TI.SetMemoryValue(11, patrol_point.lon)
TI.SetMemoryValue(12, patrol_point.lat)
TI.SetMemoryValue(2, 3) # drop a buoy at waypoint
elif (range_km < 5.0):
TI.SetUpdateInterval(15.0)
else:
TI.SetUpdateInterval(45.0)
hdg_deg = UI.GetHeadingToDatum(lon_rad, lat_rad)
UI.SetHeading(hdg_deg)
# check how far we are from last buoy drop and drop another if far enough
last_lon_rad = TI.GetMemoryValue(61)
last_lat_rad = TI.GetMemoryValue(62)
if ((last_lon_rad != 0) and (UI.GetRangeToDatum(last_lon_rad, last_lat_rad) >= 5.0)):
TI.SetMemoryValue(2, 5) # drop a buoy
return
if (state == 3):
nLaunchers = UI.GetLauncherCount()
for n in range(0, nLaunchers):
launcher_info = UI.GetLauncherInfo(n)
if (launcher_info.LaunchMode == 11): # sonobuoy
if (not IsSonobuoyWithin(UI, buoy_spacing_km)):
lon_rad = TI.GetMemoryValue(11)
lat_rad = TI.GetMemoryValue(12)
UI.SendDatumToLauncher(lon_rad, lat_rad, 0, n)
UI.Launch(n, 1)
# update last drop point
TI.SetMemoryValue(61, my_track.Lon)
TI.SetMemoryValue(62, my_track.Lat)
break
TI.SetMemoryValue(2, 4) # look for sub target
TI.SetUpdateInterval(10.0)
return
if (state == 4):
# check for a sub target to attack
target_id, launcher_idx = GetSubTarget(UI)
if ((target_id != -1) and (launcher_idx != -1)):
UI.SetTarget(target_id) # double check target is set
range_km = UI.GetRangeToTarget()
if (range_km < 4):
target_info = UI.GetTargetTrackInfo()
UI.SendDatumToLauncher(target_info.Lon, target_info.Lat, target_info.Alt, launcher_idx)
UI.Launch(launcher_idx, 1)
else:
TI.SetUpdateInterval(10.0) # reposition to get closer to target
else:
TI.SetUpdateInterval(10.0)
TI.SetMemoryValue(2, 1) # back to search
if (state == 5): # drop a buoy while flying leg to next point
# drop buoy
nLaunchers = UI.GetLauncherCount()
for n in range(0, nLaunchers):
launcher_info = UI.GetLauncherInfo(n)
if (launcher_info.LaunchMode == 11): # sonobuoy
if (not IsSonobuoyWithin(UI, buoy_spacing_km)):
lon_rad = TI.GetMemoryValue(11)
lat_rad = TI.GetMemoryValue(12)
UI.SendDatumToLauncher(lon_rad, lat_rad, 0, n)
UI.Launch(n, 1)
break
# update last drop point
TI.SetMemoryValue(61, my_track.Lon)
TI.SetMemoryValue(62, my_track.Lat)
# return to state 1
TI.SetUpdateInterval(10.0)
TI.SetMemoryValue(2, 1)
# move between current waypoint and previous
def BarrierPatrol(TI):
BB = TI.GetBlackboardInterface()
UI = TI.GetPlatformInterface()
if (not UI.IsAir()):
TI.EndTask()
return
state = TI.GetMemoryValue(2) # 0 - uninit, 1 - initialized
if (state == 0):
UI.SetAllSensorState(1)
TI.SetMemoryValue(2, 1)
current_waypoint = UI.GetCurrentWaypoint()
TI.SetMemoryValue(11, current_waypoint)
# check for bingo fuel and cancel task if so
t = UI.GetTime()
last_bingo_check = TI.GetMemoryValue(20)
if ((t - last_bingo_check) > 60):
TI.SetMemoryValue(20, t)
if (CheckBingo(UI, BB)):
cruise_alt = UI.GetCruiseAltitude()
UI.SetAltitude(cruise_alt)
UI.SetSpeed(UI.GetCruiseSpeedForAltitude(cruise_alt))
ReleaseConnControl(BB)
TI.EndTask()
return
start_waypoint = TI.GetMemoryValue(11)
current_waypoint = UI.GetCurrentWaypoint()
if (current_waypoint > start_waypoint+1):
UI.SetCurrentWaypoint(int(start_waypoint))
# Look for nearby sub target to engage
def GetSubTarget(UI):
# anAffiliation: UNKNOWN = 0, FRIENDLY = 1, NEUTRAL = 2, HOSTILE = 3
#
# anClassMask:
# PTYPE_SURFACE 0x0010
# PTYPE_AIR 0x0020
# PTYPE_MISSILE 0x0040
# PTYPE_SUBSURFACE 0x0080
# PTYPE_SUBMARINE 0x0081
# PTYPE_FIXED 0x0100
# int anClassMask, float afMaxRange_km, UINT8 anAffiliation
track_list = UI.GetTrackList(0x0081, 100, VALID_ROE)
current_time = UI.GetTime()
nTracks = track_list.Size()
best_range = 1e6
best_target = -1
best_launcher = -1
best_engaged_count = 99
for n in range(0, nTracks):
track_info = track_list.GetTrack(n)
track_id = track_info.ID
is_destroyed = track_info.IsDestroyed()
track_error_km = track_info.TrackErrorKm()
engaged_count = track_info.GetEngagedCount()
if ((engaged_count < 1) and (track_error_km < 2) and (not is_destroyed)):
UI.SetTarget(track_id)
launcher_info = UI.GetBestLauncher()
launcher_idx = launcher_info.Launcher
#UI.DisplayMessage('Best launcher %d' % launcher_idx)
if (launcher_idx != -1):
target_range = UI.GetRangeToTarget()
launch_range = launcher_info.Range_km # reference max range, use for launch decision
is_better = (target_range <= launch_range) and (engaged_count < best_engaged_count)
is_better = is_better or ((engaged_count == best_engaged_count) and (target_range < best_range))
if (is_better):
best_range = target_range
best_target = track_id
best_launcher = launcher_idx
best_engaged_count = engaged_count
UI.SetTarget(best_target)
return (best_target, best_launcher)
# @return 1 if within box of range_km on a half side
def IsSonobuoyWithin(UI, range_km):
track_list = UI.GetTrackList(0x0084, range_km, 1)
return (track_list.Size() > 0)
|
|
#!/usr/bin/env python
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from structdata import g_project
from structdata import Action
from eventeditor import EventEditor
class EditorButton(QPushButton):
entries = None
def __init__(self, item=None, parent=None):
super(EditorButton, self).__init__(parent)
self.item = item
self.createButton()
def createButton(self):
if self.item is not None:
self.setText(self.calcText(self.item))
else:
self.setText("+")
menu = QMenu(self)
for entry in self.entries:
menu.addAction(entry)
self.setMenu(menu)
def calcText(self, item):
raise NotImplementedError
class ActionButton(EditorButton):
entries = ["ROOM_GOTO", "ITEM_MOVE", "VAR_SET"]
def calcText(self, action):
line = action.id
for param in action.params:
line = "%s %s" % (line, param.value)
return line
class RequirementButton(EditorButton):
entries = ["ITEM_REQ", "VAR_REQ"]
def calcText(self, requirement):
line = "%s %s %s" % (requirement.tag_name.upper(),
requirement.id, requirement.value)
return line
class MinusButton(QPushButton):
def sizeHint(self):
return QSize(30, 30)
def __init__(self, item, parent=None):
super(MinusButton, self).__init__(parent)
self.item = item
self.setText("-")
class AreaEditor(QDialog):
"""
classe che permette di modificare l'evento associato ad un area
"""
def __init__(self, area, parent=None):
super(AreaEditor, self).__init__(parent)
g_project.subscribe(self)
self.area = area
self.vl = QVBoxLayout(self)
self.createInterface()
def updateAreaName(self):
"""
funzione che modifica il nome dell'area che si sta editando
"""
area_name = unicode(self.change_area_name.text())
if self.area.id != area_name:
self.area.setName(area_name)
def updateEventName(self, name):
"""
funzione che cambia il nome dell'evento associato all'area che si sta
editando
"""
event_name = unicode(self.change_event_name.text())
if self.area.event != event_name:
g_project.changeEventName(self.area.event, event_name)
self.area.event = event_name
def closeEvent(self, event=None):
g_project.unsubscribe(self)
def resetAreaEditor(self):
children = self.findChildren(QWidget)
for child in children:
if child.parent() == self and not isinstance(child, EventEditor):
child.setParent(None)
def updateData(self):
self.resetAreaEditor()
self.createInterface()
def createInterface(self):
self.gl = QGridLayout()
self.createList()
self.vl.addLayout(self.gl)
gl = QGridLayout()
gl.addWidget(QLabel("Area name"), 0, 0)
self.change_area_name = QLineEdit()
self.change_area_name.setText(self.area.id)
gl.addWidget(self.change_area_name, 0, 1)
gl.addWidget(QLabel("Event name"), 1, 0)
self.change_event_name = QLineEdit()
self.change_event_name.setText(self.area.event)
gl.addWidget(self.change_event_name, 1, 1)
self.vl.addLayout(gl)
self.vl.addStretch()
self.vl.addWidget(QStatusBar(self))
self.connect(self.change_area_name,
SIGNAL("textEdited(const QString &)"),
self.updateAreaName)
self.connect(self.change_event_name,
SIGNAL("textEdited(const QString &)"),
self.updateEventName)
def createList(self):
"""
funzione che crea la lista dei bottoni per l'agigunta e la modifica
di action e requirement
"""
self.signal_minus_mapper = QSignalMapper(self)
self.minus_buttons = []
if self.area.event in g_project.data['events'].keys():
self.event = g_project.data['events'][self.area.event]
self.gl.addWidget(QLabel("Actions", parent=self), 0, 0)
row, i = self.createButtons(ActionButton, self.event.actions, 1, 0)
self.gl.addWidget(QLabel("Requirements", parent=self), row, 0)
row += 1
row, i = self.createButtons(RequirementButton, self.event.requirements,
row, i)
self.connect(self.signal_minus_mapper, SIGNAL("mapped(int)"),
self.removeElement)
def createButtons(self, button_type, items, row_start, mapper_start_index):
"""
funzione che crea tutti i bottoni per particolare insieme di dati.
I parametri necessari sono il tipo di bottone che deve essere creato
(button_type), l'elenco degli elementi da associare ai bottoni (items),
la riga di partenza in cui inserire il bottone e l'indice da
associare al QSignalMapper.
La funzione ritorna la riga a cui e' arrivato nella costruzione e
l'indice a cui e' arrivato il QSignalMapper
"""
row = row_start
i = mapper_start_index
for item in items:
button = button_type(item, self)
button.setAutoDefault(False)
minus_button = MinusButton(item)
minus_button.setAutoDefault(False)
self.minus_buttons.append(minus_button)
self.gl.addWidget(minus_button, row , 1)
self.gl.addWidget(button, row, 0)
self.connect(minus_button, SIGNAL("clicked()"),
self.signal_minus_mapper, SLOT("map()"))
self.signal_minus_mapper.setMapping(minus_button, i)
self.connect(button, SIGNAL("clicked()"),
self.editEvent)
row += 1
i += 1
self.add_button = button_type(parent=self)
self.add_button.setAutoDefault(False)
self.gl.addWidget(self.add_button, row, 0)
row += 1
self.connect(self.add_button.menu(), SIGNAL("triggered(QAction *)"),
self.createNewEvent)
return row, i
def editEvent(self):
self.createEventEditor(item=self.sender().item)
def createNewEvent(self, act):
self.createEventEditor(tag=act.text())
def createEventEditor(self, tag=None, item=None):
"""
funzione che crea un EventEditor relativo all'evento associato
all'area
"""
self.event_editor = EventEditor(self.event, item, tag, self)
self.event_editor.move(self.x() + self.width() + 50, self.y())
self.event_editor.setModal(True)
self.event_editor.show()
def removeElement(self, index):
"""
funzione che elimina un action o un requirement dall'event associato
all'area. La funzione prende in ingresso l'indice relativo
al bottone selezionato
"""
item = self.minus_buttons[index].item
if isinstance(item, Action):
index = self.event.actions.index(item)
g_project.data['events'][self.area.event].actions.pop(index)
else:
index = self.event.requirements.index(item)
g_project.data['events'][self.area.event].requirements.pop(index)
g_project.notify()
if __name__ == "__main__":
from os.path import split
from openfilerooms import openFileRooms
from savefilerooms import saveFileRooms
from utils import g_ptransform
openFileRooms('../examples/example5/world.rooms')
g_ptransform.path_file = split('../examples/example5/world.rooms')[0]
app = QApplication([])
room = g_project.data['rooms']['Stanza Stella']
area = AreaEditor(room.areas[1])
area.show()
area.move(200, 200)
app.exec_()
saveFileRooms("a.rooms")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PublicIPPrefixesOperations(object):
"""PublicIPPrefixesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPPrefix"
"""Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "_models.PublicIPPrefix"
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPPrefix"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "_models.PublicIPPrefix"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PublicIPPrefix"]
"""Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public IP prefix operation.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.PublicIPPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPPrefix"
"""Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to update public IP prefix tags.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPPrefixListResult"]
"""Gets all the public IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPPrefixListResult"]
"""Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
|
|
# Copyright 2015 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from oslo_log import log as logging
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions
import testtools
from testtools import testcase as tc
from manila_tempest_tests.common import constants
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager_share as manager
from manila_tempest_tests import utils
CONF = config.CONF
LOG = logging.getLogger(__name__)
@ddt.ddt
class ShareBasicOpsBase(manager.ShareScenarioTest):
"""This smoke test case follows this basic set of operations:
* Create share network
* Create share
* Launch an instance
* Allow access
* Perform ssh to instance
* Mount share
* Terminate the instance
"""
protocol = None
def setUp(self):
super(ShareBasicOpsBase, self).setUp()
base.verify_test_has_appropriate_tags(self)
self.image_ref = None
# Setup image and flavor the test instance
# Support both configured and injected values
self.floatings = {}
if self.protocol not in CONF.share.enable_protocols:
message = "%s tests are disabled" % self.protocol
raise self.skipException(message)
if self.protocol not in CONF.share.enable_ip_rules_for_protocols:
message = ("%s tests for access rules other than IP are disabled" %
self.protocol)
raise self.skipException(message)
if not hasattr(self, 'flavor_ref'):
self.flavor_ref = CONF.share.client_vm_flavor_ref
if CONF.share.image_with_share_tools:
images = self.compute_images_client.list_images()["images"]
for img in images:
if img["name"] == CONF.share.image_with_share_tools:
self.image_ref = img['id']
break
if not self.image_ref:
msg = ("Image %s not found" %
CONF.share.image_with_share_tools)
raise exceptions.InvalidConfiguration(message=msg)
self.ssh_user = CONF.share.image_username
LOG.debug('Starting test for i:{image}, f:{flavor}. '
'user: {ssh_user}'.format(
image=self.image_ref, flavor=self.flavor_ref,
ssh_user=self.ssh_user))
self.security_group = self._create_security_group()
self.create_share_network()
def boot_instance(self, wait_until="ACTIVE"):
self.keypair = self.create_keypair()
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'key_name': self.keypair['name'],
'security_groups': security_groups,
'wait_until': wait_until,
}
if CONF.share.multitenancy_enabled:
create_kwargs['networks'] = [{'uuid': self.net['id']}, ]
instance = self.create_server(
image_id=self.image_ref, flavor=self.flavor_ref, **create_kwargs)
return instance
def init_ssh(self, instance, do_ping=False):
# Obtain a floating IP
floating_ip = (self.compute_floating_ips_client.create_floating_ip()
['floating_ip'])
self.floatings[instance['id']] = floating_ip
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.compute_floating_ips_client.delete_floating_ip,
floating_ip['id'])
# Attach a floating IP
self.compute_floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], instance['id'])
# Check ssh
ssh_client = self.get_remote_client(
server_or_ip=floating_ip['ip'],
username=self.ssh_user,
private_key=self.keypair['private_key'])
# NOTE(u_glide): Workaround for bug #1465682
ssh_client = ssh_client.ssh_client
self.share = self.shares_client.get_share(self.share['id'])
if do_ping:
server_ip = self.share['export_location'].split(":")[0]
ssh_client.exec_command("ping -c 1 %s" % server_ip)
return ssh_client
def mount_share(self, location, ssh_client, target_dir=None):
raise NotImplementedError
def umount_share(self, ssh_client, target_dir=None):
target_dir = target_dir or "/mnt"
ssh_client.exec_command("sudo umount %s" % target_dir)
def write_data(self, data, ssh_client):
ssh_client.exec_command("echo \"%s\" | sudo tee /mnt/t1 && sudo sync" %
data)
def read_data(self, ssh_client):
data = ssh_client.exec_command("sudo cat /mnt/t1")
return data.rstrip()
def migrate_share(self, share_id, dest_host, status, force_host_assisted):
share = self._migrate_share(
share_id, dest_host, status, force_host_assisted,
self.shares_admin_v2_client)
return share
def migration_complete(self, share_id, dest_host):
return self._migration_complete(share_id, dest_host)
def create_share_network(self):
self.net = self._create_network(namestart="manila-share")
self.subnet = self._create_subnet(network=self.net,
namestart="manila-share-sub")
router = self._get_router()
self._create_router_interface(subnet_id=self.subnet['id'],
router_id=router['id'])
self.share_net = self._create_share_network(
neutron_net_id=self.net['id'],
neutron_subnet_id=self.subnet['id'],
name=data_utils.rand_name("sn-name"))
def _get_share_type(self):
if CONF.share.default_share_type_name:
return self.shares_client.get_share_type(
CONF.share.default_share_type_name)['share_type']
return self._create_share_type(
data_utils.rand_name("share_type"),
extra_specs={
'snapshot_support': CONF.share.capability_snapshot_support,
'driver_handles_share_servers': CONF.share.multitenancy_enabled
},)['share_type']
def create_share(self, **kwargs):
kwargs.update({
'share_protocol': self.protocol,
})
if not ('share_type_id' in kwargs or 'snapshot_id' in kwargs):
kwargs.update({'share_type_id': self._get_share_type()['id']})
if CONF.share.multitenancy_enabled:
kwargs.update({'share_network_id': self.share_net['id']})
self.share = self._create_share(**kwargs)
return self.share
def allow_access_ip(self, share_id, ip=None, instance=None, cleanup=True,
snapshot=None):
if instance and not ip:
try:
net_addresses = instance['addresses']
first_address = net_addresses.values()[0][0]
ip = first_address['addr']
except Exception:
LOG.debug("Instance: %s" % instance)
# In case on an error ip will be still none
LOG.exception("Instance does not have a valid IP address."
"Falling back to default")
if not ip:
ip = '0.0.0.0/0'
if snapshot:
self._allow_access_snapshot(snapshot['id'], access_type='ip',
access_to=ip, cleanup=cleanup)
else:
self._allow_access(share_id, access_type='ip', access_to=ip,
cleanup=cleanup)
def provide_access_to_auxiliary_instance(self, instance, share=None,
snapshot=None):
share = share or self.share
if self.protocol.lower() == 'cifs':
self.allow_access_ip(
share['id'], instance=instance, cleanup=False,
snapshot=snapshot)
elif not CONF.share.multitenancy_enabled:
self.allow_access_ip(
share['id'], ip=self.floatings[instance['id']]['ip'],
instance=instance, cleanup=False, snapshot=snapshot)
elif (CONF.share.multitenancy_enabled and
self.protocol.lower() == 'nfs'):
self.allow_access_ip(
share['id'], instance=instance, cleanup=False,
snapshot=snapshot)
def wait_for_active_instance(self, instance_id):
waiters.wait_for_server_status(
self.manager.servers_client, instance_id, "ACTIVE")
return self.manager.servers_client.show_server(instance_id)["server"]
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_mount_share_one_vm(self):
instance = self.boot_instance(wait_until="BUILD")
self.create_share()
instance = self.wait_for_active_instance(instance["id"])
ssh_client = self.init_ssh(instance)
self.provide_access_to_auxiliary_instance(instance)
if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
locations = self.share['export_locations']
else:
exports = self.shares_v2_client.list_share_export_locations(
self.share['id'])
locations = [x['path'] for x in exports]
for location in locations:
self.mount_share(location, ssh_client)
self.umount_share(ssh_client)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_read_write_two_vms(self):
"""Boots two vms and writes/reads data on it."""
test_data = "Some test data to write"
# Boot two VMs and create share
instance1 = self.boot_instance(wait_until="BUILD")
instance2 = self.boot_instance(wait_until="BUILD")
self.create_share()
instance1 = self.wait_for_active_instance(instance1["id"])
instance2 = self.wait_for_active_instance(instance2["id"])
# Write data to first VM
ssh_client_inst1 = self.init_ssh(instance1)
self.provide_access_to_auxiliary_instance(instance1)
if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
locations = self.share['export_locations']
else:
exports = self.shares_v2_client.list_share_export_locations(
self.share['id'])
locations = [x['path'] for x in exports]
self.mount_share(locations[0], ssh_client_inst1)
self.addCleanup(self.umount_share,
ssh_client_inst1)
self.write_data(test_data, ssh_client_inst1)
# Read from second VM
ssh_client_inst2 = self.init_ssh(instance2)
self.provide_access_to_auxiliary_instance(instance2)
self.mount_share(locations[0], ssh_client_inst2)
self.addCleanup(self.umount_share,
ssh_client_inst2)
data = self.read_data(ssh_client_inst2)
self.assertEqual(test_data, data)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@testtools.skipUnless(CONF.share.run_host_assisted_migration_tests or
CONF.share.run_driver_assisted_migration_tests,
"Share migration tests are disabled.")
@ddt.data(True, False)
def test_migration_files(self, force_host_assisted):
if (force_host_assisted and
not CONF.share.run_host_assisted_migration_tests):
raise self.skipException("Host-assisted migration tests are "
"disabled.")
elif (not force_host_assisted and
not CONF.share.run_driver_assisted_migration_tests):
raise self.skipException("Driver-assisted migration tests are "
"disabled.")
if self.protocol != "nfs":
raise self.skipException("Only NFS protocol supported "
"at this moment.")
pools = self.shares_admin_v2_client.list_pools(detail=True)['pools']
if len(pools) < 2:
raise self.skipException("At least two different pool entries are "
"needed to run share migration tests.")
instance = self.boot_instance(wait_until="BUILD")
self.create_share()
instance = self.wait_for_active_instance(instance["id"])
self.share = self.shares_admin_v2_client.get_share(self.share['id'])
default_type = self.shares_v2_client.list_share_types(
default=True)['share_type']
dest_pool = utils.choose_matching_backend(
self.share, pools, default_type)
self.assertIsNotNone(dest_pool)
self.assertIsNotNone(dest_pool.get('name'))
dest_pool = dest_pool['name']
ssh_client = self.init_ssh(instance)
self.provide_access_to_auxiliary_instance(instance)
exports = self.shares_v2_client.list_share_export_locations(
self.share['id'])
self.assertNotEmpty(exports)
exports = [x['path'] for x in exports]
self.assertNotEmpty(exports)
self.mount_share(exports[0], ssh_client)
ssh_client.exec_command("sudo mkdir -p /mnt/f1")
ssh_client.exec_command("sudo mkdir -p /mnt/f2")
ssh_client.exec_command("sudo mkdir -p /mnt/f3")
ssh_client.exec_command("sudo mkdir -p /mnt/f4")
ssh_client.exec_command("sudo mkdir -p /mnt/f1/ff1")
ssh_client.exec_command("sleep 1")
ssh_client.exec_command(
"sudo dd if=/dev/zero of=/mnt/f1/1m1.bin bs=1M count=1")
ssh_client.exec_command(
"sudo dd if=/dev/zero of=/mnt/f2/1m2.bin bs=1M count=1")
ssh_client.exec_command(
"sudo dd if=/dev/zero of=/mnt/f3/1m3.bin bs=1M count=1")
ssh_client.exec_command(
"sudo dd if=/dev/zero of=/mnt/f4/1m4.bin bs=1M count=1")
ssh_client.exec_command(
"sudo dd if=/dev/zero of=/mnt/f1/ff1/1m5.bin bs=1M count=1")
ssh_client.exec_command("sudo chmod -R 555 /mnt/f3")
ssh_client.exec_command("sudo chmod -R 777 /mnt/f4")
task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED
if force_host_assisted
else constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
self.share = self.migrate_share(
self.share['id'], dest_pool, task_state, force_host_assisted)
if force_host_assisted:
self.assertRaises(
exceptions.SSHExecCommandFailed,
ssh_client.exec_command,
"dd if=/dev/zero of=/mnt/f1/1m6.bin bs=1M count=1")
self.umount_share(ssh_client)
self.share = self.migration_complete(self.share['id'], dest_pool)
new_exports = self.shares_v2_client.list_share_export_locations(
self.share['id'])
self.assertNotEmpty(new_exports)
new_exports = [x['path'] for x in new_exports]
self.assertNotEmpty(new_exports)
self.assertEqual(dest_pool, self.share['host'])
self.assertEqual(constants.TASK_STATE_MIGRATION_SUCCESS,
self.share['task_state'])
self.mount_share(new_exports[0], ssh_client)
output = ssh_client.exec_command("ls -lRA --ignore=lost+found /mnt")
self.umount_share(ssh_client)
self.assertIn('1m1.bin', output)
self.assertIn('1m2.bin', output)
self.assertIn('1m3.bin', output)
self.assertIn('1m4.bin', output)
self.assertIn('1m5.bin', output)
def _get_user_export_location(self, share=None, snapshot=None):
user_export_location = None
if share:
if utils.is_microversion_lt(
CONF.share.max_api_microversion, "2.9"):
user_export_location = share['export_locations'][0]
else:
exports = self.shares_v2_client.list_share_export_locations(
share['id'])
locations = [x['path'] for x in exports]
user_export_location = locations[0]
elif snapshot:
exports = (self.shares_v2_client.
list_snapshot_export_locations(snapshot['id']))
locations = [x['path'] for x in exports]
user_export_location = locations[0]
self.assertIsNotNone(user_export_location)
return user_export_location
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@testtools.skipUnless(
CONF.share.run_snapshot_tests, "Snapshot tests are disabled.")
def test_write_data_to_share_created_from_snapshot(self):
if self.protocol.upper() == 'CIFS':
msg = "Skipped for CIFS protocol because of bug/1649573"
raise self.skipException(msg)
# 1 - Create UVM, ok, created
instance = self.boot_instance(wait_until="BUILD")
# 2 - Create share S1, ok, created
parent_share = self.create_share()
instance = self.wait_for_active_instance(instance["id"])
self.addCleanup(self.servers_client.delete_server, instance['id'])
# 3 - SSH to UVM, ok, connected
ssh_client = self.init_ssh(instance)
# 4 - Provide RW access to S1, ok, provided
self.provide_access_to_auxiliary_instance(instance, parent_share)
# 5 - Try mount S1 to UVM, ok, mounted
user_export_location = self._get_user_export_location(parent_share)
parent_share_dir = "/mnt/parent"
ssh_client.exec_command("sudo mkdir -p %s" % parent_share_dir)
self.mount_share(user_export_location, ssh_client, parent_share_dir)
self.addCleanup(self.umount_share, ssh_client, parent_share_dir)
# 6 - Create "file1", ok, created
ssh_client.exec_command("sudo touch %s/file1" % parent_share_dir)
# 7 - Create snapshot SS1 from S1, ok, created
snapshot = self._create_snapshot(parent_share['id'])
# 8 - Create "file2" in share S1 - ok, created. We expect that
# snapshot will not contain any data created after snapshot creation.
ssh_client.exec_command("sudo touch %s/file2" % parent_share_dir)
# 9 - Create share S2 from SS1, ok, created
child_share = self.create_share(snapshot_id=snapshot["id"])
# 10 - Try mount S2 - fail, access denied. We test that child share
# did not get access rules from parent share.
user_export_location = self._get_user_export_location(child_share)
child_share_dir = "/mnt/child"
ssh_client.exec_command("sudo mkdir -p %s" % child_share_dir)
self.assertRaises(
exceptions.SSHExecCommandFailed,
self.mount_share,
user_export_location, ssh_client, child_share_dir,
)
# 11 - Provide RW access to S2, ok, provided
self.provide_access_to_auxiliary_instance(instance, child_share)
# 12 - Try mount S2, ok, mounted
self.mount_share(user_export_location, ssh_client, child_share_dir)
self.addCleanup(self.umount_share, ssh_client, child_share_dir)
# 13 - List files on S2, only "file1" exists
output = ssh_client.exec_command("sudo ls -lRA %s" % child_share_dir)
self.assertIn('file1', output)
self.assertNotIn('file2', output)
# 14 - Create file3 on S2, ok, file created
ssh_client.exec_command("sudo touch %s/file3" % child_share_dir)
# 15 - List files on S1, two files exist - "file1" and "file2"
output = ssh_client.exec_command("sudo ls -lRA %s" % parent_share_dir)
self.assertIn('file1', output)
self.assertIn('file2', output)
self.assertNotIn('file3', output)
# 16 - List files on S2, two files exist - "file1" and "file3"
output = ssh_client.exec_command("sudo ls -lRA %s" % child_share_dir)
self.assertIn('file1', output)
self.assertNotIn('file2', output)
self.assertIn('file3', output)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.32")
@testtools.skipUnless(CONF.share.run_mount_snapshot_tests,
'Mountable snapshots tests are disabled.')
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_read_mountable_snapshot(self):
if self.protocol.upper() == 'CIFS':
msg = "Skipped for CIFS protocol because of bug/1649573"
raise self.skipException(msg)
# 1 - Create UVM, ok, created
instance = self.boot_instance(wait_until="BUILD")
# 2 - Create share S1, ok, created
parent_share = self.create_share()
instance = self.wait_for_active_instance(instance["id"])
self.addCleanup(self.servers_client.delete_server, instance['id'])
# 3 - SSH to UVM, ok, connected
ssh_client = self.init_ssh(instance)
# 4 - Provide RW access to S1, ok, provided
self.provide_access_to_auxiliary_instance(instance, parent_share)
# 5 - Try mount S1 to UVM, ok, mounted
user_export_location = self._get_user_export_location(parent_share)
parent_share_dir = "/mnt/parent"
snapshot_dir = "/mnt/snapshot_dir"
ssh_client.exec_command("sudo mkdir -p %s" % parent_share_dir)
ssh_client.exec_command("sudo mkdir -p %s" % snapshot_dir)
self.mount_share(user_export_location, ssh_client, parent_share_dir)
self.addCleanup(self.umount_share, ssh_client, parent_share_dir)
# 6 - Create "file1", ok, created
ssh_client.exec_command("sudo touch %s/file1" % parent_share_dir)
# 7 - Create snapshot SS1 from S1, ok, created
snapshot = self._create_snapshot(parent_share['id'])
# 8 - Create "file2" in share S1 - ok, created. We expect that
# snapshot will not contain any data created after snapshot creation.
ssh_client.exec_command("sudo touch %s/file2" % parent_share_dir)
# 9 - Allow access to SS1
self.provide_access_to_auxiliary_instance(instance, snapshot=snapshot)
# 10 - Mount SS1
user_export_location = self._get_user_export_location(
snapshot=snapshot)
self.mount_share(user_export_location, ssh_client, snapshot_dir)
self.addCleanup(self.umount_share, ssh_client, snapshot_dir)
# 11 - List files on SS1, only "file1" exists
# NOTE(lseki): using ls without recursion to avoid permission denied
# error while listing lost+found directory on LVM volumes
output = ssh_client.exec_command("sudo ls -lA %s" % snapshot_dir)
self.assertIn('file1', output)
self.assertNotIn('file2', output)
# 12 - Try to create a file on SS1, should fail
self.assertRaises(
exceptions.SSHExecCommandFailed,
ssh_client.exec_command,
"sudo touch %s/file3" % snapshot_dir)
class TestShareBasicOpsNFS(ShareBasicOpsBase):
protocol = "nfs"
def mount_share(self, location, ssh_client, target_dir=None):
target_dir = target_dir or "/mnt"
ssh_client.exec_command(
"sudo mount -vt nfs \"%s\" %s" % (location, target_dir))
class TestShareBasicOpsCIFS(ShareBasicOpsBase):
protocol = "cifs"
def mount_share(self, location, ssh_client, target_dir=None):
location = location.replace("\\", "/")
target_dir = target_dir or "/mnt"
ssh_client.exec_command(
"sudo mount.cifs \"%s\" %s -o guest" % (location, target_dir)
)
# NOTE(u_glide): this function is required to exclude ShareBasicOpsBase from
# executed test cases.
# See: https://docs.python.org/2/library/unittest.html#load-tests-protocol
# for details.
def load_tests(loader, tests, _):
result = []
for test_case in tests:
if type(test_case._tests[0]) is ShareBasicOpsBase:
continue
result.append(test_case)
return loader.suiteClass(result)
|
|
from flask import render_template, flash, redirect, session, url_for, request, g, jsonify
from flask_login import login_user, logout_user, current_user, login_required
from sqlalchemy.orm.exc import NoResultFound
from app import app, db, lm
from app.service import factory as serviceFactory
from forms import LoginForm, RegisterForm, ApplicationForm, ServiceForm, WatcherForm, RepositoryForm, TokenForm
from models import User, Application, Service, Watcher, Repository, Token
from datetime import datetime
from config import POSTS_PER_PAGE
import json
pages = [
{
'url': 'applications',
'title': 'Applications'
},
{
'url': 'services',
'title': 'Services'
},
{
'url': 'watchers',
'title': 'Watchers'
},
{
'url': 'repositories',
'title': 'Repositories'
},
{
'url': 'tokens',
'title': 'Tokens'
}
]
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.route('/')
@app.route('/<string:watch>')
@app.route('/<string:watch>/<int:page>')
@app.route('/<string:watch>/<int:page>/<int:count>')
def index(watch = None, page=1, count = POSTS_PER_PAGE):
pages = Repository.query.filter(Repository.watcher.has(name = watch)).\
order_by(Repository.create.desc()).\
paginate(page, count)
def decript_entity(entity):
try:
return json.loads(entity)
except ValueError as e:
return {}
except TypeError as e:
return {}
return jsonify({
'page': str(pages.page or 1),
'prev': str(pages.prev_num),
'next': str(pages.next_num),
'total_pages': str(pages.pages),
'per_page': str(pages.per_page),
'result': [{
'id': r.id,
'key': r.key,
'create': '' if r.create == None else r.create.strftime("%Y-%m-%d %H:%M:%S"),
'add': '' if r.add == None else r.add.strftime("%Y-%m-%d %H:%M:%S"),
'push': '' if r.push == None else r.push.strftime("%Y-%m-%d %H:%M:%S"),
'text': r.text,
'entities': decript_entity(r.text_raw)
} for r in pages.items]});
@app.route('/admin')
@app.route('/admin/index')
@login_required
def admin():
return render_template('index.html', title = 'Home', pages = pages)
@app.route('/tokens/<string:token>')
def tokens_add(token):
if token is None or len(token) != 64:
return jsonify(error = 'token invalid');
try:
f = Token.query.filter(Token.token == token).first()
except NoResultFound as e:
f = None
if f is not None:
return jsonify(error = 'token exists');
token = Token(token = token, enabled = True)
db.session.add(token)
db.session.commit()
return jsonify(ok='ok');
@app.route('/admin/tokens', methods = ['GET', 'POST'])
@app.route('/admin/tokens/<int:page>', methods = ['GET', 'POST'])
@login_required
def tokens(page=1):
form = TokenForm()
if request.method == 'POST':
if form.delete.validate(form):
try:
deleteId = int(form.delete.data);
except ValueError as e:
deleteId = 0
if deleteId > 0:
token = db.session.query(Token).get(deleteId)
if token is None:
flash('Cannot delete token: ' + str(deleteId))
else:
db.session.delete(token)
db.session.commit()
return redirect(url_for('token', page = page))
if form.validate():
try:
editId = int(form.id.data);
except ValueError as e:
editId = 0
if editId > 0:
token = db.session.query(Token).get(editId)
if token is None:
flash('Cannot edit token: ' + str(editId))
else:
token.token = form.token.data
token.enabled = form.enabled.data
db.session.commit()
return redirect(url_for('tokens', page = page))
token = Token(token = form.token.data, enabled = form.enabled.data)
db.session.add(token)
db.session.commit()
flash('Your token is now live!')
return redirect(url_for('tokens', page = page))
else:
flash('No data')
return redirect(url_for('tokens', page = page))
tokens = Token.query.order_by('id').paginate(page, POSTS_PER_PAGE, False);
return render_template('list.html', pages = pages, form = form, elements = tokens, fields = ['id', 'token', 'enabled'], title = 'Token', model = 'token', route = 'tokens')
@app.route('/admin/applications', methods = ['GET', 'POST'])
@app.route('/admin/applications/<int:page>', methods = ['GET', 'POST'])
@login_required
def applications(page=1):
form = ApplicationForm()
if request.method == 'POST':
if form.delete.validate(form):
try:
deleteId = int(form.delete.data);
except ValueError as e:
deleteId = 0
if deleteId > 0:
application = db.session.query(Application).get(deleteId)
if application is None:
flash('Cannot delete application: ' + str(deleteId))
else:
db.session.delete(application)
db.session.commit()
return redirect(url_for('applications', page = page))
if form.validate():
try:
editId = int(form.id.data);
except ValueError as e:
editId = 0
if editId > 0:
application = db.session.query(Application).get(editId)
if application is None:
flash('Cannot edit application: ' + str(editId))
else:
application.name = form.name.data
application.params = form.params.data
db.session.commit()
return redirect(url_for('applications', page = page))
application = Application(name = form.name.data, params = form.params.data)
db.session.add(application)
db.session.commit()
flash('Your application is now live!')
return redirect(url_for('applications', page = page))
else:
flash('No data')
return redirect(url_for('applications', page = page))
applications = Application.query.order_by('id').paginate(page, POSTS_PER_PAGE, False);
return render_template('list.html', pages = pages, form = form, elements = applications, fields = ['id', 'name', 'params', 'status'], title = 'Application', model = 'application', route = 'applications')
@app.route('/admin/services', methods = ['GET', 'POST'])
@app.route('/admin/services/<int:page>', methods = ['GET', 'POST'])
@login_required
def services(page=1):
form = ServiceForm()
if request.method == 'POST':
if form.delete.validate(form):
try:
deleteId = int(form.delete.data);
except ValueError as e:
deleteId = 0
if deleteId > 0:
service = db.session.query(Service).get(deleteId)
if service is None:
flash('Cannot delete service: ' + str(deleteId))
else:
db.session.delete(service)
db.session.commit()
return redirect(url_for('services', page = page))
if form.validate():
try:
editId = int(form.id.data);
except ValueError as e:
editId = 0
if editId > 0:
service = db.session.query(Service).get(editId)
if service is None:
flash('Cannot edit service: ' + str(editId))
else:
service.api = form.api.data
service.params = form.params.data
db.session.commit()
return redirect(url_for('services', page = page))
service = Service(api = form.api.data, params = form.params.data)
db.session.add(service)
db.session.commit()
flash('Your services is now live!')
return redirect(url_for('services', page = page))
else:
flash('No data')
return redirect(url_for('services', page = page))
services = Service.query.order_by('id').paginate(page, POSTS_PER_PAGE, False);
return render_template('list.html', pages = pages, form = form, elements = services, fields = ['id', 'api', 'params', 'status'], title = 'Service', model = 'service', route = 'services')
@app.route('/admin/watchers', methods = ['GET', 'POST'])
@app.route('/admin/watchers/<int:page>', methods = ['GET', 'POST'])
@login_required
def watchers(page=1):
form = WatcherForm()
form.application.choices = [(a.id, a.name) for a in Application.query.order_by('id')]
form.service.choices = [(s.id, s.api) for s in Service.query.order_by('id')]
if request.method == 'POST':
if form.delete.validate(form):
try:
deleteId = int(form.delete.data);
except ValueError as e:
deleteId = 0
if deleteId > 0:
watcher = db.session.query(Watcher).get(deleteId)
if watcher is None:
flash('Cannot delete watcher: ' + str(deleteId))
else:
db.session.delete(watcher)
db.session.commit()
return redirect(url_for('watchers', page = page))
if form.validate():
try:
editId = int(form.id.data);
except ValueError as e:
editId = 0
if editId > 0:
watcher = db.session.query(Watcher).get(editId)
if watcher is None:
flash('Cannot edit watcher: ' + str(editId))
else:
watcher.name = form.name.data
watcher.repository = form.repository.data
watcher.push = form.push.data
watcher.application_id = form.application.data
watcher.service_id = form.service.data
watcher.params = form.params.data
watcher.push_params = form.push_params.data
db.session.commit()
return redirect(url_for('watchers', page = page))
watcher = Watcher(name = form.name.data, repository = form.repository.data, push = form.push.data, application_id = form.application.data, service_id = form.service.data, params = form.params.data, push_params = form.push_params.data)
db.session.add(watcher)
db.session.commit()
flash('Your watcher is now live!')
return redirect(url_for('watchers', page = page))
else:
flash('No data')
return redirect(url_for('watchers', page = page))
watchers = Watcher.query.order_by('id').paginate(page, POSTS_PER_PAGE, False);
return render_template('list.html', pages = pages, form = form, elements = watchers, fields = ['id', 'name', 'repository', 'push', 'params', 'push_params', 'application', 'service'], title = 'Watcher', model = 'watcher', route = 'watchers')
@app.route('/admin/repositories', methods = ['GET', 'POST'])
@app.route('/admin/repositories/<int:page>', methods = ['GET', 'POST'])
@login_required
def repositories(page=1):
form = RepositoryForm()
form.watcher.choices = [(w.id, w.name) for w in Watcher.query.order_by('id')]
if request.method == 'POST':
if form.delete.validate(form):
try:
deleteId = int(form.delete.data);
except ValueError as e:
deleteId = 0
if deleteId > 0:
repository = db.session.query(Repository).get(deleteId)
if repository is None:
flash('Cannot delete repository: ' + str(deleteId))
else:
db.session.delete(repository)
db.session.commit()
return redirect(url_for('repositories', page = page))
if form.validate():
try:
editId = int(form.id.data);
except ValueError as e:
editId = 0
if editId > 0:
repository = db.session.query(Repository).get(editId)
if repository is None:
flash('Cannot edit repository: ' + str(editId))
else:
repository.watcher_id = form.watcher.data
repository.add = form.add.data
repository.push = form.push.data
repository.text_raw = form.text_raw.data
repository.text = form.text.data
db.session.commit()
return redirect(url_for('repositories', page = page))
repository = Repository(watcher_id = form.watcher.data, add = form.add.data, push = form.push.data, text_raw = form.text_raw.data, text = form.text.data)
db.session.add(repository)
db.session.commit()
flash('Your repository is now live!')
return redirect(url_for('repositories', page = page))
else:
flash('No data')
return redirect(url_for('repositories', page = page))
repositories = Repository.query.order_by('id').paginate(page, POSTS_PER_PAGE, False);
return render_template('list.html', pages = pages, form = form, elements = repositories, fields = ['id', 'key', 'watcher','create','add', 'push', 'text', 'text_raw'], title = 'Repository', model = 'repository', route = 'repositories')
@app.route('/admin/run/<string:model>/<int:id>')
#@login_required
def run(model, id):
service = serviceFactory(model);
if service is not None:
return jsonify(service.run(id))
return jsonify(error = 'no run with type ' + model + ' found');
'''
@app.route('/admin/register', methods=['GET', 'POST'])
def register():
"""User registration route."""
if current_user.is_authenticated:
return redirect(url_for('admin'))
form = RegisterForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None:
flash('Username already exists.')
return redirect(url_for('register'))
user = User(email=form.email.data, password=form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('admin'))
return render_template('register.html', title = 'Please sign out', form=form)
'''
@app.route('/admin/login', methods = ['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('admin'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.verify_password(form.password.data):
flash('Invalid username or password.')
return redirect(url_for('login'))
login_user(user)
flash('You are now logged in!')
return redirect(url_for('admin'))
return render_template('login.html', title = 'Please sign in', form = form)
@app.route('/admin/logout')
def logout():
logout_user()
return redirect(url_for('admin'))
|
|
import json
data = [
'abac',
'academ',
'acanth',
'acro',
'actin',
'adeno',
'aero',
'aesth',
'aether',
'olog',
'hellen',
'ether',
'agon',
'gogue',
'agro',
'ailur',
'alcyon',
'aleuro',
'algia',
'algesic',
'allo',
'allel',
'alph',
'alphit',
'amath',
'ambly',
'ammo',
'amnio',
'andr',
'anem',
'anti',
'anth',
'aphrod',
'arachn',
'arch',
'archae',
'arct',
'aret',
'arist',
'arithm',
'arsen',
'arthr',
'arti',
'asco',
'aspr',
'aster',
'astr',
'asthen',
'ather',
'athl',
'aux',
'auto',
'axon',
'bapt',
'baro',
'bary',
'bathy',
'bibl',
'bio',
'blasto',
'blenno',
'botan',
'brachio',
'brachy',
'brady',
'branchi',
'bromat',
'bronch',
'bronto',
'butyr',
'byss',
'caco',
'eccle',
'calyp',
'cardi',
'carp',
'cata',
'cathar',
'caust',
'ceno',
'centr',
'cephal',
'ceram',
'cerat',
'chiro',
'chelon',
'chlor',
'chondr',
'choreo',
'chrom',
'chron',
'chrys',
'cirr',
'clade',
'clado',
'clast',
'clav',
'cleisto',
'cleithr',
'clini',
'cochl',
'coel',
'conic',
'copro',
'corac',
'cosm',
'cosmet',
'cotyl',
'cracy',
'crat',
'crani',
'crepid',
'crico',
'criti',
'crine',
'crypt',
'cryph',
'cten',
'cyan',
'cycl',
'cylind',
'cyn',
'cyst',
'cyt',
'didact',
'dacry',
'dactyl',
'adaman',
'deca',
'delt',
'demo',
'dendr',
'derm',
'despot',
'deuter',
'dexi',
'dia',
'diacosi',
'dino',
'dipl',
'dote',
'dodec',
'dox',
'drama',
'drachm',
'drome',
'droso',
'dryad',
'dyad',
'dyna',
'dys',
'ecc',
'ecto',
'eco',
'eiren',
'electr',
'elem',
'emet',
'enantio',
'encephal',
'endo',
'echo',
'atom',
'engy',
'ennea',
'eos',
'epi',
'ephed',
'ergic',
'urgy',
'ergy',
'energ',
'ergat',
'ergo',
'orgi',
'orgy',
'surgi',
'erot',
'erythr',
'esot',
'ethi',
'etho',
'ethm',
'ethn',
'etym',
'eur',
'exo',
'galact',
'galax',
'lacto',
'gamo',
'gamy',
'gamet',
'gamm',
'gargal',
'gargar',
'gastr',
'geo',
'geiton',
'genesi',
'gephyr',
'geron',
'geri',
'geran',
'geusia',
'glauc',
'glia',
'gloss',
'glot',
'glute',
'glyc',
'glyph',
'gnath',
'gnom',
'gnos',
'gramm',
'graph',
'gryph',
'gymn',
'olymp',
'gyn',
'gyrin',
'hadro',
'aem',
'hemo',
'hali',
'halo',
'hapl',
'hedo',
'heli',
'hemer',
'hemi',
'hendec',
'hept',
'heres',
'heret',
'heur',
'hex',
'hier',
'hipp',
'ode',
'holi',
'holo',
'homo',
'anoma',
'homeo',
'horo',
'hormo',
'hyal',
'hybr',
'hubr',
'hydn',
'hydr',
'hygr',
'hymen',
'hypo',
'hyph',
'hyo',
'hyper',
'hyph',
'hypn',
'hyps',
'hyster',
'hyen',
'iatr',
'ichthy',
'icos',
'idol',
'ideo',
'iso',
'ischi',
'kilo',
'kine',
'cine',
'klept',
'kudo',
'lamp',
'lecith',
'leio',
'lekan',
'lepid',
'lepto',
'lepro',
'lepsi',
'leuk',
'leuc',
'lipo',
'litan',
'lith',
'logy',
'logist',
'logo',
'logic',
'lys',
'lyti',
'macro',
'magnet',
'mania',
'mechan',
'mega',
'meio',
'melan',
'meliss',
'meno',
'mening',
'meso',
'meta',
'meter',
'metr',
'micro',
'mime',
'mimi',
'mint',
'miso',
'misa',
'mono',
'moron',
'moric',
'moni',
'morph',
'myo',
'mys',
'mycet',
'myco',
'mydr',
'amyl',
'myth',
'myri',
'myrmec',
'myx',
'myz',
'narc',
'naut',
'neo',
'necro',
'nect',
'nemat',
'nephr',
'nesia',
'neur',
'nomy',
'nomic',
'nomia',
'nomad',
'noto',
'notho',
'nyct',
'obel',
'obol',
'ocean',
'ochl',
'oct',
'odont',
'dynia',
'dyne',
'oec',
'oeno',
'phag',
'oestr',
'estro',
'ogdo',
'oid',
'oligo',
'oliga',
'oma',
'ombr',
'ommat',
'omphal',
'onisc',
'onco',
'oneir',
'onio',
'onomat',
'onto',
'onych',
'onym',
'ophi',
'ophthalm',
'opisth',
'opia',
'opsy',
'opt',
'opis',
'opson',
'orog',
'orch',
'orches',
'oreg',
'organ',
'ornith',
'orphan',
'ortho',
'oryz',
'osm',
'oste',
'osto',
'ostrac',
'ostre',
'oxy',
'oxi',
'ozo',
'pach',
'pae',
'pedo',
'paed',
'pedi',
'peda',
'palae',
'paleo',
'palin',
'palim',
'pan',
'para',
'parthen',
'path',
'pater',
'patr',
'pect',
'pelit',
'pelag',
'pelarg',
'pomp',
'penia',
'pent',
'pente',
'pepper',
'pept',
'peri',
'persic',
'petr',
'phae',
'phag',
'phalang',
'phalar',
'pharm',
'phan',
'fant',
'pheno',
'phob',
'phor',
'pheug',
'phyg',
'phil',
'phim',
'phleb',
'phleg',
'phlog',
'phon',
'photo',
'phos',
'phrag',
'phren',
'phron',
'phryn',
'phthleg',
'phyc',
'phyl',
'phys',
'phyt',
'piez',
'pino',
'pirat',
'pirac',
'piso',
'pithec',
'placo',
'place',
'plag',
'plan',
'plas',
'plat',
'plec',
'ploc',
'pleg',
'plect',
'plex',
'plesi',
'pleth',
'pleur',
'plinth',
'pluto',
'pneu',
'pnig',
'pnict',
'pod',
'pogon',
'poe',
'poie',
'pole',
'pola',
'poli',
'poly',
'pomph',
'pore',
'pori',
'poro',
'porn',
'porphyr',
'potam',
'prag',
'pras',
'presby',
'prio',
'priap',
'prism',
'pro',
'psa',
'pseph',
'pseud',
'psil',
'psithyr',
'psittac',
'psoph',
'psor',
'psych',
'pter',
'pto',
'pty',
'pyel',
'pyg',
'pyl',
'pyo',
'pyr',
'pyramid',
'pyrrh',
'pyth',
'^tri',
'olympi',
'asthma',
'raph',
'rhabd',
'rhach',
'rach',
'rhag',
'rheg',
'rheo',
'rhythm',
'rhetin',
'rhig',
'rhin',
'rhiz',
'rhod',
'rhomb',
'rhynch',
'spond',
'spor',
'sacchar',
'salping',
'sapphir',
'sapr',
'sarc',
'saur',
'scalen',
'scandal',
'scaph',
'scat',
'sced',
'scel',
'sceni',
'sceno',
'scene',
'skep',
'scop',
'schem',
'schiz',
'schis',
'scler',
'scolec',
'scoli',
'scombr',
'scoto',
'scyph',
'seism',
'selen',
'sema',
'seri',
'sidero',
'sidere',
'sigm',
'sinap',
'sipho',
'parasit',
'smargd',
'smilo',
'solen',
'soma',
'osome',
'somy',
'soph',
'spasm',
'spast',
'spad',
'sporo',
'spore',
'spele',
'spond',
'spelunk',
'sperm',
'sphal',
'sphen',
'spher',
'sphing',
'sphinct',
'sphondyl',
'sphrag',
'sphyg',
'sphyx',
'spleni',
'spleno',
'stat',
'stasi',
'staphyl',
'steat',
'steg',
'steno',
'steri',
'stereo',
'sternum',
'stetho',
'sthen',
'stich',
'stigm',
'stoch',
'stom',
'strom',
'strat',
'astro',
'stroph',
'strept',
'styg',
'styl',
'sybar',
'syco',
'sym',
'syn',
'sys',
'syring',
'tachy',
'tacho',
'taeni',
'taxi',
'taxy',
'taxo',
'tars',
'taur',
'tech',
'criter',
'lesb',
'tekn',
'tele',
'tomy',
'tetart',
'tetr',
'teuch',
'thalam',
'thalass',
'thana',
'thano',
'theo',
'theis',
'thesi',
'theti',
'thema',
'theori',
'thero',
'therap',
'therm',
'thigm',
'thixo',
'thorac',
'thym',
'thyr',
'tonic',
'topi',
'tope',
'toxi',
'trach',
'trag',
'trapez',
'traum',
'treiskaidek',
'trema',
'troch',
'trop',
'tryp',
'tympan',
'type',
'typi',
'typo',
'typh',
'tyrann',
'diur',
'urea',
'uria',
'urem',
'uran',
'xanth',
'xen',
'xero',
'xiph',
'xyl',
'eczem',
'zeal',
'zephyr',
'zed',
'zeta',
'zizyph',
'azo',
'zoo',
'zon',
'zyg',
'zym',
'itis',
'hepat',
'character'
]
with open('greekRootsList.json', 'w') as f:
json.dump(data, f)
|
|
# encoding: utf8
__all__ = ['CalendarFrame']
import calendar
import locale
import tkinter as tk
import tkinter.ttk as ttk
imgp_data = ('R0lGODlhDAAMAIABAAAAAP///yH+EUNyZWF0ZWQgd2l0aCBHSU1QACH5BAEK' +
'AAEALAAAAAAMAAwAAAIVjI+JoMsdgIRyqmoTfrfCmDWh+DUFADs=')
imgn_data = ('R0lGODlhDAAMAIABAAAAAP///yH+EUNyZWF0ZWQgd2l0aCBHSU1QACH5BAEK' +
'AAEALAAAAAAMAAwAAAIUjI8ZoAnczINtUmdrVpu/uFwcSBYAOw==')
def get_calendar(locale, fwday):
# instantiate proper calendar class
if locale is None:
return calendar.TextCalendar(fwday)
else:
return calendar.LocaleTextCalendar(fwday, locale)
def i2rc(i, coldim):
c = i % coldim
f = (i - c) // coldim
return (f, c)
def rowmajor(rows, cols):
size = rows * cols
for i in range(0, size):
c = i % cols
f = (i - c) // cols
yield (i, f, c)
def matrix_coords(rows, cols, rowh, colw, ox=0, oy=0):
"Generate coords for a matrix of rects"
for i, f, c in rowmajor(rows, cols):
x = ox + c * colw
y = oy + f * rowh
x1 = x + colw
y1 = y + rowh
yield (i, x, y, x1, y1)
class CalendarFrame(ttk.Frame):
""" Allows to choose a date in a calendar.
WIDGET-SPECIFIC OPTIONS
locale, firstweekday, year, month
calendarfg, calendarbg,
headerfg, headerbg,
selectbg, selectfg,
markbg, markfg,
Generates:
<<CalendarFrameDateSelected>>
"""
datetime = calendar.datetime.datetime
timedelta = calendar.datetime.timedelta
def __init__(self, master=None, **kw):
self.__redraw_cb = None # For redraw callback check.
self.__markdays_cb = None # For markdays callback check.
sysloc = locale.getlocale(locale.LC_TIME)
if None in sysloc:
sysloc = None
else:
sysloc = '{0}.{1}'.format(*sysloc)
self.__options = options = {
'firstweekday': calendar.SUNDAY,
'year': self.datetime.now().year,
'month': self.datetime.now().month,
'locale': sysloc,
'calendarfg': 'black',
'calendarbg': 'white',
'headerfg': 'black',
'headerbg': 'grey90',
'selectbg': '#8000FF',
'selectfg': 'white',
'state': 'normal',
'markbg': 'white',
'markfg': 'blue',
}
# remove custom options from kw before initialization ttk.Frame
for k, v in options.items():
options[k] = kw.pop(k, v)
# Marked days
self._marked_days = set()
# Calendar variables
self._date = self.datetime(options['year'], options['month'], 1)
self._cal = get_calendar(options['locale'], options['firstweekday'])
self._weeks = self._cal.monthdayscalendar(options['year'],
options['month'])
self._selection = None
# Canvas variables
self._rheader = None
self._theader = [0 for x in range(0, 7)]
self._recmat = [0 for x in rowmajor(6, 7)]
self._txtmat = [0 for x in rowmajor(6, 7)]
# button bar variables
self.__img_prev = None
self.__img_next = None
self._lmonth = None
self._lyear = None
ttk.Frame.__init__(self, master, **kw)
# build ui
self.__build_ui()
def configure(self, cnf=None, **kw):
args = tk._cnfmerge((cnf, kw))
color_change = False
for key in ('calendarfg', 'calendarbg', 'headerfg', 'headerbg',
'selectbg', 'selectfg', 'markbg', 'markfg'):
if key in args:
self.__options[key] = args.pop(key)
color_change = True
key = 'state'
if key in args:
value = args.pop(key)
self.__options[key] = value
self._canvas.config(state=value)
for w in self._topframe.winfo_children():
if w.winfo_class() == 'TButton':
w.config(state=value)
calendar_change = False
key = 'locale'
if key in args:
value = locale.normalize(args.pop(key))
self.__options[key] = value
calendar_change = True
key = 'firstweekday'
if key in args:
value = args.pop(key)
self.__options[key] = int(value)
calendar_change = True
if calendar_change:
self._reconfigure_calendar()
date_change = False
for key in ('year', 'month'):
if key in args:
self.__options[key] = int(args.pop(key))
date_change = True
if date_change:
self._reconfigure_date()
if color_change or calendar_change or date_change:
self._redraw_calendar()
ttk.Frame.configure(self, args)
config = configure
def cget(self, key):
if key in ('locale', 'firstweekday', 'calendarfg', 'calendarbg',
'headerfg', 'headerbg', 'selectbg', 'selectfg',
'markbg', 'markfg', 'state'):
return self.__options[key]
option = 'year'
if key == option:
return self._date.year
option = 'month'
if key == option:
return self._date.month
return ttk.Frame.cget(self, key)
__getitem__ = cget
def __build_ui(self):
# BUILD UI
self.configure(height='200', width='200')
self._topframe = ttk.Frame(self)
self._topframe.configure(height='200', width='200')
self.bpmonth = ttk.Button(self._topframe)
self.bpmonth.configure(style='Toolbutton', text='L')
self.bpmonth.pack(side='left')
self.bnmonth = ttk.Button(self._topframe)
self.bnmonth.configure(style='Toolbutton', text='R')
self.bnmonth.pack(side='left')
self._lmonth = ttk.Label(self._topframe)
self._lmonth.configure(anchor='center', text='January')
self._lmonth.pack(side='left')
self.btoday = ttk.Button(self._topframe)
self.btoday.configure(style='Toolbutton', text='Today')
self.btoday.pack(expand='true', fill='x', side='left')
self._lyear = ttk.Label(self._topframe)
self._lyear.configure(text='2020')
self._lyear.pack(side='left')
self.bpyear = ttk.Button(self._topframe)
self.bpyear.configure(style='Toolbutton', text='L')
self.bpyear.pack(side='left')
self.bnyear = ttk.Button(self._topframe)
self.bnyear.configure(style='Toolbutton', text='R')
self.bnyear.pack(side='left')
self._topframe.pack(anchor='n', fill='x', side='top')
self._canvas = tk.Canvas(self)
self._canvas.configure(
background='#ffffff',
borderwidth='0',
height='160',
highlightthickness='0')
self._canvas.configure(width='240')
self._canvas.pack(
anchor='center',
expand='true',
fill='both',
side='top')
self.__img_prev = imgp = tk.PhotoImage(data=imgp_data)
self.__img_next = imgn = tk.PhotoImage(data=imgn_data)
#self._lmonth = lmonth
#self._lyear = lyear
def callback(event=None): return self._change_date('month', -1)
self.bpmonth.configure(image=imgp, command=callback)
def callback(event=None): return self._change_date('month', 1)
self.bnmonth.configure(image=imgn, command=callback)
def callback(event=None): return self._change_date('year', -1)
self.bpyear.configure(image=imgp, command=callback)
def callback(event=None): return self._change_date('year', 1)
self.bnyear.configure(image=imgn, command=callback)
self.btoday.configure(command=self._go_today)
self._canvas.bind('<Configure>', self._on_canvas_configure)
#self._topframe = frame2
#self._canvas = canvas
self._draw_calendar(self._canvas)
self._canvas.tag_bind('cell', '<Button-1>', self._on_cell_clicked)
def _reconfigure_calendar(self):
options = self.__options
self._date = self.datetime(options['year'], options['month'], 1)
self._cal = get_calendar(options['locale'], options['firstweekday'])
self._weeks = self._cal.monthdayscalendar(options['year'],
options['month'])
def _reconfigure_date(self):
options = self.__options
self._date = self.datetime(options['year'], options['month'], 1)
self._weeks = self._cal.monthdayscalendar(options['year'],
options['month'])
self._selection = None # Forget current selected day
self._redraw_calendar()
def _go_today(self, event=None):
options = self.__options
today = self.datetime.now()
options['year'] = today.year
options['month'] = today.month
self._reconfigure_date()
def _change_date(self, element, direction):
options = self.__options
newdate = None
if element == 'month':
if direction == -1:
newdate = self._date - self.timedelta(days=1)
else:
year, month = self._date.year, self._date.month
days = calendar.monthrange(year, month)[1] + 1
newdate = self._date + self.timedelta(days=days)
elif element == 'year':
year = self._date.year + direction
newdate = self.datetime(year, self._date.month, 1)
options['year'] = newdate.year
options['month'] = newdate.month
self._reconfigure_date()
def _on_cell_clicked(self, event=None):
item = self._canvas.find_withtag('current')
idx = self._recmat.index(item[0])
weeks = self._weeks
day = 0
f, c = i2rc(idx, 7)
if f < len(weeks):
day = weeks[f][c]
if day != 0:
self.select_day(day, self._date.month, self._date.year)
def _mark_days(self):
options = self.__options
year = self._date.year
month = self._date.month
weeks = self._weeks
now = self.datetime.now()
today = (now.year, now.month, now.day)
for i, f, c in rowmajor(6, 7):
day = 0
clear = True
if f < len(weeks):
day = weeks[f][c]
key = (year, month, day)
if ((None, None, day) in self._marked_days
or (None, month, day) in self._marked_days
or key in self._marked_days):
self._canvas.itemconfigure(self._recmat[i],
fill=options['markbg'],
outline=options['markbg'])
self._canvas.itemconfigure(self._txtmat[i],
fill=options['markfg'])
clear = False
if key == today:
self._canvas.itemconfigure(self._recmat[i],
fill=options['selectfg'],
outline=options['selectbg'])
self._canvas.itemconfigure(self._txtmat[i],
fill=options['selectbg'])
clear = False
if key == self._selection:
self._canvas.itemconfigure(self._recmat[i],
fill=options['selectbg'],
outline=options['selectbg'])
self._canvas.itemconfigure(self._txtmat[i],
fill=options['selectfg'])
clear = False
if clear:
# clear day
self._canvas.itemconfigure(self._recmat[i],
fill=options['calendarbg'],
outline=options['calendarbg'])
self._canvas.itemconfigure(self._txtmat[i],
fill=options['calendarfg'])
self.__markdays_cb = None
def _call_mark_days(self):
if self.__markdays_cb is None:
self.__markdays_cb = self.after_idle(self._mark_days)
def _remark_date(self, day, month=None, year=None, highlight=True):
key = (year, month, day)
if highlight:
self._marked_days.add(key)
else:
if key in self._marked_days:
self._marked_days.remove(key)
self._call_mark_days()
def mark_day(self, day, month=None, year=None):
"""Marks the specified month day with a visual marker
(typically by making the number bold).
If only day is specified and the calendar month and year
are changed, the marked day remain marked.
You can be more specific setting month and year parameters.
"""
self._remark_date(day, month, year, highlight=True)
def unmark_day(self, day, month=None, year=None):
self._remark_date(day, month, year, highlight=False)
def clear_marks(self):
"""Clears all marked days"""
self._marked_days.clear()
self._call_mark_days()
def _draw_calendar(self, canvas, redraw=False):
"""Draws calendar."""
options = self.__options
# Update labels:
name = self._cal.formatmonthname(self._date.year, self._date.month, 0,
withyear=False)
self._lmonth.configure(text=name.title())
self._lyear.configure(text=str(self._date.year))
# Update calendar
ch = canvas.winfo_height()
cw = canvas.winfo_width()
rowh = ch / 7.0
colw = cw / 7.0
# Header background
if self._rheader is None:
self._rheader = canvas.create_rectangle(0, 0, cw, rowh, width=0,
fill=options['headerbg'])
else:
canvas.itemconfigure(self._rheader, fill=options['headerbg'])
canvas.coords(self._rheader, 0, 0, cw, rowh)
# Header text
ox = 0
oy = rowh / 2.0
coffset = colw / 2.0
cols = self._cal.formatweekheader(3).split()
for i in range(0, 7):
x = ox + i * colw + coffset
if redraw:
item = self._theader[i]
canvas.coords(item, x, oy)
canvas.itemconfigure(item, text=cols[i],
fill=options['headerfg'])
else:
self._theader[i] = canvas.create_text(x, oy, text=cols[i],
fill=options['headerbg'])
# background matrix
oy = rowh
ox = 0
for i, x, y, x1, y1 in matrix_coords(6, 7, rowh, colw, ox, oy):
x1 -= 1
y1 -= 1
if redraw:
rec = self._recmat[i]
canvas.coords(rec, x, y, x1, y1)
canvas.itemconfigure(rec, fill=options['calendarbg'])
else:
rec = canvas.create_rectangle(
x,
y,
x1,
y1,
width=1,
fill=options['calendarbg'],
outline=options['calendarbg'],
activeoutline=options['selectbg'],
activewidth=1,
tags='cell')
self._recmat[i] = rec
# text matrix
weeks = self._weeks
xoffset = colw / 2.0
yoffset = rowh / 2.0
oy = rowh
ox = 0
for i, x, y, x1, y1 in matrix_coords(6, 7, rowh, colw, ox, oy):
x += coffset
y += yoffset
# day text
txt = ""
f, c = i2rc(i, 7)
if f < len(weeks):
day = weeks[f][c]
txt = "{0}".format(day) if day != 0 else ""
if redraw:
item = self._txtmat[i]
canvas.coords(item, x, y)
canvas.itemconfigure(item, text=txt)
else:
self._txtmat[i] = canvas.create_text(x, y, text=txt,
state=tk.DISABLED)
# Mark days
self._mark_days()
def _redraw_calendar(self):
self._draw_calendar(self._canvas, redraw=True)
# after idle callback trick
self.__redraw_cb = None
def _on_canvas_configure(self, event=None):
if self.__redraw_cb is None:
self.__redraw_cb = self.after_idle(self._redraw_calendar)
@property
def selection(self):
"""Return a datetime representing the current selected date."""
if not self._selection:
return None
year, month = self._date.year, self._date.month
return self.datetime(year, month, self._selection[2])
def select_day(self, day, month=None, year=None):
options = self.__options
options['month'] = month = self._date.month if month is None else month
options['year'] = year = self._date.year if year is None else year
self._reconfigure_date()
self._selection = (year, month, day)
self._call_mark_days()
self.event_generate('<<CalendarFrameDateSelected>>')
if __name__ == '__main__':
import random
locale.setlocale(locale.LC_ALL, locale.getdefaultlocale())
root = tk.Tk()
c = CalendarFrame(root)
c.grid()
# select day
c.select_day(1)
root.rowconfigure(0, weight=1)
root.columnconfigure(0, weight=1)
root.mainloop()
|
|
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_patient_kirc.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[4]],sex_dict[i[8]],int(i[-16])]
if i[24]=='Alive':
clinical4.append([i[0],int(i[25]),'Alive'])
elif i[24]=='Dead':
clinical4.append([i[0],int(i[26]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
|
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import re
import sys
import build_version
from build_paths import SDK_SRC_DIR, SCRIPT_DIR, OUT_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
import getos
VALID_PLATFORMS = ['linux', 'mac', 'win']
PLATFORM_PREFIX_RE = re.compile(r'^\[([^\]]*)\](.*)$')
class ParseException(Exception):
def __init__(self, filename, line, message):
Exception.__init__(self)
self.filename = filename
self.line = line
self.message = message
def __str__(self):
return '%s:%d: %s' % (self.filename, self.line, self.message)
def SplitPattern(pattern):
match = PLATFORM_PREFIX_RE.match(pattern)
if not match:
return pattern, []
# platform-specific line
platforms = match.group(1).split(',')
# If this platform is included, strip the [...] part.
pattern = match.group(2)
return pattern, platforms
class VerifyException(Exception):
pass
class Rules(object):
def __init__(self, filename, platform=None, contents=None):
self.glob_prefixes = []
self.exact_filenames = set()
self.filename = filename
self.platform = platform or getos.GetPlatform()
self.exe_ext = '.exe' if self.platform == 'win' else ''
if self.platform not in VALID_PLATFORMS:
raise ParseException(self.filename, 1,
'Unknown platform %s' % self.platform)
if not contents:
with open(filename) as f:
contents = f.read()
for line_no, rule in enumerate(contents.split('\n')):
rule = rule.strip()
if rule:
self.ParsePattern(line_no + 1, rule)
def ParsePattern(self, line_no, pattern):
pattern, platforms = SplitPattern(pattern)
if platforms:
unknown_platforms = set(platforms) - set(VALID_PLATFORMS)
if unknown_platforms:
msg = 'Unknown platform(s) %s.' % (
', '.join('"%s"' % platform for platform in unknown_platforms))
raise ParseException(self.filename, line_no, msg)
if self.platform not in platforms:
return
pattern = pattern.replace('${PLATFORM}', self.platform)
pattern = pattern.replace('${EXE_EXT}', self.exe_ext)
if '*' in pattern:
# glob pattern
# We only support * at the end.
if pattern.find('*') != len(pattern) - 1:
msg = '* is only allowed at the end of the line.'
raise ParseException(self.filename, line_no, msg)
# Remove the *
pattern = pattern[:-1]
self.glob_prefixes.append(pattern)
# Sort by longest prefix first; otherwise the rules:
#
# foo/*
# foo/bar/*
#
# Won't work properly. A file "foo/bar/baz" will match the first rule,
# not the second.
self.glob_prefixes.sort(cmp=lambda x, y: cmp(len(y), len(x)))
else:
self.exact_filenames.add(pattern)
def VerifyDirectoryList(self, directory_list):
exact_filenames_used = set()
glob_prefixes_used = set()
expected_globs = set()
expected_filenames = set()
unexpected_filenames = set()
for filename in directory_list:
if os.path.sep != '/':
filename = filename.replace(os.path.sep, '/')
if filename in self.exact_filenames:
exact_filenames_used.add(filename)
continue
# glob pattern
found_prefix = False
for prefix in self.glob_prefixes:
if filename.startswith(prefix):
glob_prefixes_used.add(prefix)
found_prefix = True
break
if not found_prefix:
unexpected_filenames.add(filename)
if len(exact_filenames_used) != len(self.exact_filenames):
# We looped through the directory list, so if the lengths are unequal, it
# must be that we expected something that isn't there.
expected_filenames = self.exact_filenames - exact_filenames_used
if len(glob_prefixes_used) != self.glob_prefixes:
expected_globs = set(self.glob_prefixes) - glob_prefixes_used
if expected_filenames or unexpected_filenames or expected_globs:
msg = ''
if unexpected_filenames:
msg += '>>> Unexpected filenames: <<<\n%s\n' % (
'\n'.join(sorted(unexpected_filenames)))
if expected_filenames:
msg += '>>> Expected filenames: <<<\n%s\n' % (
'\n'.join(sorted(expected_filenames)))
if expected_globs:
msg += '>>> Expected 1+ files in these directories: <<< \n%s\n' % (
'\n'.join(sorted(expected_globs)))
raise VerifyException(msg)
def GetDirectoryList(directory_path):
result = []
for root, _, files in os.walk(directory_path):
rel_root = os.path.relpath(root, directory_path)
if rel_root == '.':
rel_root = ''
for base_name in files:
result.append(os.path.join(rel_root, base_name))
return result
def Verify(rule_path, directory_path, platform=None):
rules = Rules(rule_path, platform=platform)
directory_list = GetDirectoryList(directory_path)
rules.VerifyDirectoryList(directory_list)
def SortFile(rule_path):
with open(rule_path) as infile:
lines = infile.readlines()
def compare(line1, line2):
line1 = SplitPattern(line1)[0].lower()
line2 = SplitPattern(line2)[0].lower()
return cmp(line1, line2)
lines.sort(compare)
with open(rule_path, 'w') as output:
for line in lines:
output.write(line)
def main(args):
parser = optparse.OptionParser(usage='%prog <rule file> <directory>')
parser.add_option('-p', '--platform',
help='Test with this platform, instead of the system\'s platform')
parser.add_option('-s', '--sort', action='store_true',
help='Sort the file list in place, rather than verifying the contents.')
options, args = parser.parse_args(args)
if not args:
args = [os.path.join(SCRIPT_DIR, 'sdk_files.list')]
if options.sort:
if not args:
parser.error('Expected rule file.')
SortFile(args[0])
return 0
if len(args) < 2:
version = build_version.ChromeMajorVersion()
args.append(os.path.join(OUT_DIR, 'pepper_%s' % version))
rule_path = args[0]
directory_path = args[1]
if options.platform:
if options.platform not in VALID_PLATFORMS:
parser.error('Unknown platform: %s' % options.platform)
platform = options.platform
else:
platform = getos.GetPlatform()
try:
return Verify(rule_path, directory_path, platform)
except ParseException, e:
print >> sys.stderr, 'Error parsing rules:\n', e
return 1
except VerifyException, e:
print >> sys.stderr, 'Error verifying file list:\n', e
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
docker-parse is a useful command to get
docker-run commands or docker-compose configurations from running containers
'''
from __future__ import absolute_import
from __future__ import print_function
import sys
import pipes
import getopt
import yaml
import docker
__version__ = '0.5.5'
def output_compose(info, image_info):
'''output as docker-compose format'''
container = info['Name'][1:]
conf = info['Config']
hconf = info['HostConfig']
compose = {}
compose['container_name'] = str(container)
compose['image'] = str(conf['Image'])
# Volumes
if 'Binds' in hconf and isinstance(hconf['Binds'], list):
options = []
for volume in hconf['Binds']:
options.append(str(volume))
if len(options) > 0:
compose['volumes'] = options
if 'PortBindings' in hconf and isinstance(hconf['PortBindings'], dict):
options = []
for binding, hosts in hconf['PortBindings'].items():
for host in hosts:
portbinding = ''
if 'HostIp' in host and host['HostIp']:
portbinding += host['HostIp'] + ':'
if 'HostPort' in host and host['HostPort']:
portbinding += host['HostPort'] + ':'
portbinding += binding
options.append(str(portbinding))
if len(options) > 0:
compose['ports'] = options
# Devices
if 'Devices' in hconf and isinstance(hconf['Devices'], list):
options = []
for device in hconf['Devices']:
options.append(str(device))
if len(options) > 0:
compose['devices'] = options
# RestartPolicy
if 'RestartPolicy' in hconf and hconf['RestartPolicy']['Name']:
policy = hconf['RestartPolicy']['Name']
if hconf['RestartPolicy']['MaximumRetryCount'] > 0:
policy += ':' + str(hconf['RestartPolicy']['MaximumRetryCount'])
compose['restart'] = str(policy)
# Privileged
if hconf['Privileged']:
compose['privileged'] = True
# Env
if isinstance(conf['Env'], list) and len(conf['Env']) > 0:
options = []
for env in conf['Env']:
if env not in image_info['Config']['Env']:
options.append(str(env))
if len(options) > 0:
compose['environment'] = options
# DNS
if 'Dns' in hconf and isinstance(hconf['Dns'], list):
options = []
for dns in hconf['Dns']:
options.append(str(dns))
if len(options) > 0:
compose['dns'] = options
# ExposedPorts
if 'ExposedPorts' in conf and isinstance(conf['ExposedPorts'], dict):
options = []
for port, _ in conf['ExposedPorts'].items():
if ('ExposedPorts' not in image_info['Config'] or
port not in image_info['Config']['ExposedPorts']):
options.append(str(port))
if len(options) > 0:
compose['expose'] = options
# User
if conf['User'] and image_info['Config']['User'] != conf['User']:
compose['user'] = str(conf['User'])
# WorkingDir
if image_info['Config']['WorkingDir'] != conf['WorkingDir']:
compose['working_dir'] = str(conf['WorkingDir'])
# EntryPoint
if conf['Entrypoint'] != image_info['Config']['Entrypoint']:
if isinstance(conf['Entrypoint'], list):
entry = []
for entry_item in conf['Entrypoint']:
entry.append(str(entry_item))
if len(entry) > 0:
compose['entrypoint'] = entry
elif isinstance(conf['Entrypoint'], str):
compose['entrypoint'] = str(conf['Entrypoint'])
name = str(info['Name'][1:])
print(yaml.dump({name:compose}, encoding='utf-8', default_flow_style=False))
def output_command(info, image_info, pretty=False):
'''output as docker-run command format'''
sep = pretty and ' \\\n ' or ' '
short_options = ''
options = []
container = info['Name'][1:]
conf = info['Config']
hconf = info['HostConfig']
options.append("--name={name}".format(name=container))
if not conf['AttachStdout']:
short_options += 'd'
if conf['OpenStdin']:
short_options += 'i'
if conf['Tty']:
short_options += 't'
if len(short_options) > 0:
options.append('-' + short_options)
options.append("-h {hostname}".format(hostname=conf['Hostname']))
# Volumes
if 'Binds' in hconf and isinstance(hconf['Binds'], list):
for volume in hconf['Binds']:
options.append("-v {volume}".format(volume=volume))
# PortBindings
if 'PortBindings' in hconf and isinstance(hconf['PortBindings'], dict):
for port, hosts in hconf['PortBindings'].items():
for host in hosts:
portbinding = ''
if 'HostIp' in host and host['HostIp']:
portbinding += host['HostIp'] + ':'
if 'HostPort' in host and host['HostPort']:
portbinding += host['HostPort'] + ':'
portbinding += port
options.append("-p {portbinding}".format(portbinding=portbinding))
# Devices
if 'Devices' in hconf and isinstance(hconf['Devices'], list):
for device in hconf['Devices']:
options.append("--device={device}".format(device=device))
# RestartPolicy
if 'RestartPolicy' in hconf and hconf['RestartPolicy']['Name']:
policy = hconf['RestartPolicy']['Name']
if hconf['RestartPolicy']['MaximumRetryCount'] > 0:
policy += ':' + str(hconf['RestartPolicy']['MaximumRetryCount'])
options.append("--restart={policy}".format(policy=policy))
# Privileged
if hconf['Privileged']:
options.append('--privileged')
# DNS
if 'Dns' in hconf and isinstance(hconf['Dns'], list):
for dns in hconf['Dns']:
options.append("-dns={dns}".format(dns=dns))
# ExposedPorts
if 'ExposedPorts' in conf and isinstance(conf['ExposedPorts'], dict):
for port, _ in conf['ExposedPorts'].items():
if ('ExposedPorts' not in image_info['Config'] or
port not in image_info['Config']['ExposedPorts']):
options.append("--expose={port}".format(port=port))
# Env
if isinstance(conf['Env'], list):
for env in conf['Env']:
if env not in image_info['Config']['Env']:
options.append("-e {env}".format(env=pipes.quote(env)))
# EntryPoint
if conf['Entrypoint'] != image_info['Config']['Entrypoint']:
entry = []
if isinstance(conf['Entrypoint'], list):
for entry_item in conf['Entrypoint']:
entry.append(pipes.quote(entry_item))
elif isinstance(conf['Entrypoint'], str):
entry.append(pipes.quote(conf['Entrypoint']))
if len(entry) > 0:
options.append("--entrypoint={entry}".format(entry=pipes.quote(' '.join(entry))))
# WorkingDir
if image_info['Config']['WorkingDir'] != conf['WorkingDir']:
options.append("-w {dir}".format(dir=pipes.quote(conf['WorkingDir'])))
# User
if conf['User'] and image_info['Config']['User'] != conf['User']:
options.append("-u {user}".format(user=pipes.quote(conf['User'])))
# Cmd
cmd = []
if conf['Cmd'] != image_info['Config']['Cmd']:
if isinstance(conf['Cmd'], list):
for cmd_item in conf['Cmd']:
cmd.append(pipes.quote(cmd_item))
elif isinstance(conf['Cmd'], str):
cmd.append(pipes.quote(conf['Cmd']))
print('# docker-run command for {container}'.format(container=container))
cmd_str = 'docker run{sep}{options}{sep}{image}'.format(
options=sep.join(options), sep=sep, image=conf['Image'])
if len(cmd) > 0:
cmd_str += ' ' + ' '.join(cmd)
print(cmd_str)
print()
def main():
'''main entry'''
cli = docker.from_env()
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "pcv", ["pretty", "compose"])
except getopt.GetoptError as _:
print("Usage: docker-parse [--pretty|-p|--compose|-c] [containers]")
sys.exit(2)
if len(args) == 0:
containers = cli.containers.list(all=True)
else:
containers = map(lambda nm: cli.containers.get(nm), args)
as_compose = False
pretty = False
for opt, _ in opts:
if opt == '-v':
print(__version__)
sys.exit()
elif opt == '-p' or opt == '--pretty':
pretty = True
break
elif opt == '-c' or opt == '--compose':
as_compose = True
break
for container in containers:
info = container.attrs
# diff with image info to reduce information
image_info = cli.images.get(info['Config']['Image']).attrs
if as_compose:
output_compose(info, image_info)
else:
output_command(info, image_info, pretty)
if __name__ == "__main__":
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import json
import webob
from nova import exception
from nova import test
from nova.api.openstack import wsgi
class RequestTest(test.TestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertEqual(None, request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "asdf<br />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept_xml(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = \
"application/json; q=0.3, application/xml; q=0.9"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
class ActionDispatcherTest(test.TestCase):
def test_dispatch(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
self.assertEqual(serializer.dispatch({}, action='create'), 'pants')
def test_dispatch_action_None(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action=None), 'trousers')
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
class ResponseHeadersSerializerTest(test.TestCase):
def test_default(self):
serializer = wsgi.ResponseHeadersSerializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'asdf')
self.assertEqual(response.status_int, 200)
def test_custom(self):
class Serializer(wsgi.ResponseHeadersSerializer):
def update(self, response, data):
response.status_int = 404
response.headers['X-Custom-Header'] = data['v']
serializer = Serializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'update')
self.assertEqual(response.status_int, 404)
self.assertEqual(response.headers['X-Custom-Header'], '123')
class DictSerializerTest(test.TestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual(serializer.serialize({}, 'update'), '')
class XMLDictSerializerTest(test.TestCase):
def test_xml(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>'
serializer = wsgi.XMLDictSerializer(xmlns="asdf")
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_xml)
class JSONDictSerializerTest(test.TestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_json)
class TextDeserializerTest(test.TestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual(deserializer.deserialize({}, 'update'), {})
class JSONDeserializerTest(test.TestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(deserializer.deserialize(data), as_dict)
class XMLDeserializerTest(test.TestCase):
def test_xml(self):
xml = """
<a a1="1" a2="2">
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
<d><e>1</e></d>
<f>1</f>
</a>
""".strip()
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
deserializer = wsgi.XMLDeserializer(metadata=metadata)
self.assertEqual(deserializer.deserialize(xml), as_dict)
def test_xml_empty(self):
xml = """<a></a>"""
as_dict = {"body": {"a": {}}}
deserializer = wsgi.XMLDeserializer()
self.assertEqual(deserializer.deserialize(xml), as_dict)
class RequestHeadersDeserializerTest(test.TestCase):
def test_default(self):
deserializer = wsgi.RequestHeadersDeserializer()
req = wsgi.Request.blank('/')
self.assertEqual(deserializer.deserialize(req, 'asdf'), {})
def test_custom(self):
class Deserializer(wsgi.RequestHeadersDeserializer):
def update(self, request):
return {'a': request.headers['X-Custom-Header']}
deserializer = Deserializer()
req = wsgi.Request.blank('/')
req.headers['X-Custom-Header'] = 'b'
self.assertEqual(deserializer.deserialize(req, 'update'), {'a': 'b'})
class ResponseSerializerTest(test.TestCase):
def setUp(self):
class JSONSerializer(object):
def serialize(self, data, action='default'):
return 'pew_json'
class XMLSerializer(object):
def serialize(self, data, action='default'):
return 'pew_xml'
class HeadersSerializer(object):
def serialize(self, response, data, action):
response.status_int = 404
self.body_serializers = {
'application/json': JSONSerializer(),
'application/XML': XMLSerializer(),
}
self.serializer = wsgi.ResponseSerializer(self.body_serializers,
HeadersSerializer())
def tearDown(self):
pass
def test_get_serializer(self):
ctype = 'application/json'
self.assertEqual(self.serializer.get_body_serializer(ctype),
self.body_serializers[ctype])
def test_get_serializer_unknown_content_type(self):
self.assertRaises(exception.InvalidContentType,
self.serializer.get_body_serializer,
'application/unknown')
def test_serialize_response(self):
response = self.serializer.serialize({}, 'application/json')
self.assertEqual(response.headers['Content-Type'], 'application/json')
self.assertEqual(response.body, 'pew_json')
self.assertEqual(response.status_int, 404)
def test_serialize_response_None(self):
response = self.serializer.serialize(None, 'application/json')
print response
self.assertEqual(response.headers['Content-Type'], 'application/json')
self.assertEqual(response.body, '')
self.assertEqual(response.status_int, 404)
def test_serialize_response_dict_to_unknown_content_type(self):
self.assertRaises(exception.InvalidContentType,
self.serializer.serialize,
{}, 'application/unknown')
class RequestDeserializerTest(test.TestCase):
def setUp(self):
class JSONDeserializer(object):
def deserialize(self, data, action='default'):
return 'pew_json'
class XMLDeserializer(object):
def deserialize(self, data, action='default'):
return 'pew_xml'
self.body_deserializers = {
'application/json': JSONDeserializer(),
'application/XML': XMLDeserializer(),
}
self.deserializer = wsgi.RequestDeserializer(self.body_deserializers)
def tearDown(self):
pass
def test_get_deserializer(self):
expected = self.deserializer.get_body_deserializer('application/json')
self.assertEqual(expected, self.body_deserializers['application/json'])
def test_get_deserializer_unknown_content_type(self):
self.assertRaises(exception.InvalidContentType,
self.deserializer.get_body_deserializer,
'application/unknown')
def test_get_expected_content_type(self):
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
self.assertEqual(self.deserializer.get_expected_content_type(request),
'application/json')
def test_get_action_args(self):
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12,
}],
}
expected = {'action': 'update', 'id': 12}
self.assertEqual(self.deserializer.get_action_args(env), expected)
def test_deserialize(self):
def fake_get_routing_args(request):
return {'action': 'create'}
self.deserializer.get_action_args = fake_get_routing_args
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/xml'
deserialized = self.deserializer.deserialize(request)
expected = ('create', {}, 'application/xml')
self.assertEqual(expected, deserialized)
class ResourceTest(test.TestCase):
def test_dispatch(self):
class Controller(object):
def index(self, req, pants=None):
return pants
resource = wsgi.Resource(Controller())
actual = resource.dispatch(None, 'index', {'pants': 'off'})
expected = 'off'
self.assertEqual(actual, expected)
def test_dispatch_unknown_controller_action(self):
class Controller(object):
def index(self, req, pants=None):
return pants
resource = wsgi.Resource(Controller())
self.assertRaises(AttributeError, resource.dispatch,
None, 'create', {})
|
|
# -*- coding: utf-8 -*-
from pynes.nes_types import NesRs, NesArray, NesSprite, NesString, NesChrFile
from pynes.game import PPUSprite
class BitPak:
def __init__(self, game):
self.game = game
self.assigned = None
def __call__(self):
return None
def asm(self):
return ''
def procedure(self):
return None
def attribute(self):
return ''
def assigned_to(self, assigned):
self.assigned = assigned
class rs(BitPak):
def __init__(self, game):
BitPak.__init__(self, game)
def __call__(self, size):
return NesRs(size)
class get_sprite(BitPak):
def __init__(self, game):
BitPak.__init__(self, game)
def __call__(self, sprite):
return PPUSprite(sprite, self.game)
class wait_vblank(BitPak):
def __init__(self, game):
BitPak.__init__(self, game)
def __call__(self):
return None
def asm(self):
return ' JSR WAITVBLANK\n'
def procedure(self):
return ('WAITVBLANK:\n'
' BIT $2002\n'
' BPL WAITVBLANK\n'
' RTS\n')
class clearmem(BitPak):
def __init__(self, game):
BitPak.__init__(self, game)
def asm(self):
return ('CLEARMEM:\n'
' LDA #$00\n'
' STA $0000, x\n'
' STA $0100, x\n'
' STA $0200, x\n'
' STA $0400, x\n'
' STA $0500, x\n'
' STA $0600, x\n'
' STA $0700, x\n'
' LDA #$FE\n'
' STA $0300, x\n'
' INX\n'
' BNE CLEARMEM\n')
class import_chr(BitPak):
def __init__(self, game):
BitPak.__init__(self, game)
def __call__(self, string):
assert isinstance(string, NesString)
return NesChrFile(string)
class define_sprite(BitPak):
def __init__(self, game):
BitPak.__init__(self, game)
def __call__(self, x, y, tile, attrib=0x80):
assert isinstance(x, int)
assert isinstance(y, int)
assert isinstance(tile, int) or isinstance(tile, NesArray)
return NesSprite(x, y, tile, attrib)
class cls(BitPak):
def __init__(self, game):
BitPak.__init__(self, game)
def __call__(self):
self.line = self.game.get_param('line', 1)
def asm(self):
return ' JSR CLS\n'
def procedure(self):
return ('CLS:\n'
' LDA $2002\n'
' LDA $20\n'
' LDA $2006\n'
' LDA $00\n'
' LDA $2006\n'
' LDA #00\n'
'LineLoop:'
' STA line\n'
' LDY #00\n'
' LDA #$25\n' # blank == space
'ColumnLoop:'
' STA $2007\n'
' INY'
' CPY #16\n'
' BNE ColumnLoop\n'
' LDA line\n'
' CLC\n'
' ADC #01\n'
# ' STA line\n'
' CMP #16\n'
' BNE LineLoop\n'
" LDA #00\n"
" STA $2005\n"
" STA $2005\n")
class show(BitPak):
def __init__(self, game):
BitPak.__init__(self, game)
self.game.ppu.nmi_enable = True
self.game.ppu.background_enable = True
self.game.ppu.background_pattern_table = 1
self.game.has_nmi = True
self.addressLow = game.get_param('addressLow', 1)
self.addressHigh = game.get_param('addressHigh', 1)
self.posLow = game.get_param('posLow', 1)
self.posHigh = game.get_param('posHigh', 1)
def __call__(self, string, y=None, x=None, nametable=0):
assert isinstance(string, NesString)
string.is_used = True
self.string = string
base_adress = 0x2000
if y is None:
y = 15
if x is None:
x = 16 - len(string) / 2
pos = base_adress + y * 32 + x
self.posHigh = (pos & 0xff00) >> 8
self.posLow = (pos & 0x00ff)
def asm(self):
asmcode = (" LDA #LOW(%s)\n"
" STA addressLow\n"
" LDA #HIGH(%s)\n"
" STA addressHigh\n"
" LDA #$%02X\n"
" STA posHigh\n"
" LDA #$%02X\n"
" STA posLow\n"
" JSR Show\n") % (self.string.instance_name,
self.string.instance_name,
self.posHigh,
self.posLow)
return asmcode
def procedure(self):
asmcode = ("Show:\n"
" LDA $2002\n"
" LDA posHigh\n"
" STA $2006\n"
" LDA posLow\n"
" STA $2006\n"
" LDY #$00\n"
"PrintLoop:\n"
" LDA (addressLow), y\n"
" CMP #$25\n"
" BEQ PrintEnd\n"
" STA $2007\n"
" INY\n"
" JMP PrintLoop\n"
"PrintEnd:\n"
" LDA #00\n"
" STA $2005\n"
" STA $2005\n"
" RTS\n")
return asmcode
class load_palette(BitPak):
def __init__(self, game):
BitPak.__init__(self, game)
def __call__(self, palette):
assert isinstance(palette, NesArray)
assert palette.instance_name is not None
self.palette = palette
return palette
def asm(self):
asmcode = (
'LoadPalettes:\n'
' LDA $2002 ; Reset PPU, start writing\n'
' LDA #$3F\n'
' STA $2006 ; High byte = $3F00\n'
' LDA #$00\n'
' STA $2006 ; Low byte = $3F00\n'
' LDX #$00\n'
'LoadPalettesIntoPPU:\n'
' LDA %s, x\n'
' STA $2007\n'
' INX\n') % self.palette.instance_name
asmcode += ' CPX #$%02x\n' % len(self.palette)
asmcode += ' BNE LoadPalettesIntoPPU\n'
return asmcode
class load_sprite(BitPak):
def __init__(self, game):
BitPak.__init__(self, game)
self.game.has_nmi = True # TODO remove this
self.game.ppu.sprite_enable = True
self.game.ppu.nmi_enable = True
def __call__(self, sprite, ppu_pos):
assert isinstance(sprite, NesSprite)
assert ppu_pos < 64
self.sprite = sprite
self.start_address = 0x0200 + (ppu_pos * 4)
self.sprite.ppu_address = ppu_pos
return None
def asm(self):
size = len(self.sprite)
load_sprites = self.game.get_label_for('LoadSprites')
load_sprites_into_PPU = self.game.get_label_for('LoadSpritesIntoPPU')
'''
Proposal
with asm(self.game) as a:
a.label('LoadSprites')
a.ldx = 0
a.lda = ('LoadSpritesIntoPPU', a.x)
a.sta = (self.start_address, a.x)
a.inx()
a.cpx(size * 4)
bne('LoadSpritesIntoPPU')
'''
asmcode = (
'%s:\n'
' LDX #$00\n'
'%s:\n'
' LDA %s, x\n'
' STA $%04X, x\n'
' INX\n'
' CPX #%d\n'
' BNE %s\n'
) % (load_sprites,
load_sprites_into_PPU,
self.sprite.instance_name,
self.start_address,
size * 4,
load_sprites_into_PPU)
return asmcode
|
|
__author__ = 'Iurii Sergiichuk'
""" PRESENT block cipher implementation
USAGE EXAMPLE:
---------------
Importing:
-----------
>>> from pypresent import Present
Encrypting with a 80-bit key:
------------------------------
>>> key = "00000000000000000000".decode('hex')
>>> plain = "0000000000000000".decode('hex')
>>> cipher = Present(key)
>>> encrypted = cipher.encrypt(plain)
>>> encrypted.encode('hex')
'5579c1387b228445'
>>> decrypted = cipher.decrypt(encrypted)
>>> decrypted.encode('hex')
'0000000000000000'
Encrypting with a 128-bit key:
-------------------------------
>>> key = "0123456789abcdef0123456789abcdef".decode('hex')
>>> plain = "0123456789abcdef".decode('hex')
>>> cipher = Present(key)
>>> encrypted = cipher.encrypt(plain)
>>> encrypted.encode('hex')
'0e9d28685e671dd6'
>>> decrypted = cipher.decrypt(encrypted)
>>> decrypted.encode('hex')
'0123456789abcdef'
fully based on standard specifications: http://www.crypto.ruhr-uni-bochum.de/imperia/md/content/texte/publications/conferences/present_ches2007.pdf
test vectors: http://www.crypto.ruhr-uni-bochum.de/imperia/md/content/texte/publications/conferences/slides/present_testvectors.zip
"""
class Present:
def __init__(self, key, rounds=32):
"""Create a PRESENT cipher object
key: the key as a 128-bit or 80-bit rawstring
rounds: the number of rounds as an integer, 32 by default
"""
self.rounds = rounds
if len(key) * 8 == 80:
self.roundkeys = generateRoundkeys80(string2number(key), self.rounds)
elif len(key) * 8 == 128:
self.roundkeys = generateRoundkeys128(string2number(key), self.rounds)
else:
raise ValueError, "Key must be a 128-bit or 80-bit rawstring"
def encrypt(self, block):
"""Encrypt 1 block (8 bytes)
Input: plaintext block as raw string
Output: ciphertext block as raw string
"""
state = string2number(block)
for i in xrange(self.rounds - 1):
state = addRoundKey(state, self.roundkeys[i])
state = sBoxLayer(state)
state = pLayer(state)
cipher = addRoundKey(state, self.roundkeys[-1])
return number2string_N(cipher, 8)
def decrypt(self, block):
"""Decrypt 1 block (8 bytes)
Input: ciphertext block as raw string
Output: plaintext block as raw string
"""
state = string2number(block)
for i in xrange(self.rounds - 1):
state = addRoundKey(state, self.roundkeys[-i - 1])
state = pLayer_dec(state)
state = sBoxLayer_dec(state)
decipher = addRoundKey(state, self.roundkeys[0])
return number2string_N(decipher, 8)
def get_block_size(self):
return 8
# 0 1 2 3 4 5 6 7 8 9 a b c d e f
Sbox = [0xc, 0x5, 0x6, 0xb, 0x9, 0x0, 0xa, 0xd, 0x3, 0xe, 0xf, 0x8, 0x4, 0x7, 0x1, 0x2]
Sbox_inv = [Sbox.index(x) for x in xrange(16)]
PBox = [0, 16, 32, 48, 1, 17, 33, 49, 2, 18, 34, 50, 3, 19, 35, 51,
4, 20, 36, 52, 5, 21, 37, 53, 6, 22, 38, 54, 7, 23, 39, 55,
8, 24, 40, 56, 9, 25, 41, 57, 10, 26, 42, 58, 11, 27, 43, 59,
12, 28, 44, 60, 13, 29, 45, 61, 14, 30, 46, 62, 15, 31, 47, 63]
PBox_inv = [PBox.index(x) for x in xrange(64)]
def generateRoundkeys80(key, rounds):
"""Generate the roundkeys for a 80-bit key
Input:
key: the key as a 80-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in xrange(1, rounds + 1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
# rawKey[0:64]
roundkeys.append(key >> 16)
# 1. Shift
# rawKey[19:len(rawKey)]+rawKey[0:19]
key = ((key & (2 ** 19 - 1)) << 61) + (key >> 19)
# 2. SBox
# rawKey[76:80] = S(rawKey[76:80])
key = (Sbox[key >> 76] << 76) + (key & (2 ** 76 - 1))
#3. Salt
#rawKey[15:20] ^ i
key ^= i << 15
return roundkeys
def generateRoundkeys128(key, rounds):
"""Generate the roundkeys for a 128-bit key
Input:
key: the key as a 128-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in xrange(1, rounds + 1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
roundkeys.append(key >> 64)
# 1. Shift
key = ((key & (2 ** 67 - 1)) << 61) + (key >> 67)
# 2. SBox
key = (Sbox[key >> 124] << 124) + (Sbox[(key >> 120) & 0xF] << 120) + (key & (2 ** 120 - 1))
# 3. Salt
# rawKey[62:67] ^ i
key ^= i << 62
return roundkeys
def addRoundKey(state, roundkey):
return state ^ roundkey
def sBoxLayer(state):
"""SBox function for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in xrange(16):
output += Sbox[( state >> (i * 4)) & 0xF] << (i * 4)
return output
def sBoxLayer_dec(state):
"""Inverse SBox function for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in xrange(16):
output += Sbox_inv[( state >> (i * 4)) & 0xF] << (i * 4)
return output
def pLayer(state):
"""Permutation layer for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in xrange(64):
output += ((state >> i) & 0x01) << PBox[i]
return output
def pLayer_dec(state):
"""Permutation layer for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in xrange(64):
output += ((state >> i) & 0x01) << PBox_inv[i]
return output
def string2number(i):
""" Convert a string to a number
Input: string (big-endian)
Output: long or integer
"""
return int(i.encode('hex'), 16)
def number2string_N(i, N):
"""Convert a number to a string of fixed size
i: long or integer
N: length of string
Output: string (big-endian)
"""
s = '%0*x' % (N * 2, i)
return s.decode('hex')
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
key = "0123456789abcdef0123456789abcdef".decode('hex')
plain_1 = "1weqweqd"
plain_2 = "23444444"
plain_3 = "dddd2225"
print plain_1
print plain_2
print plain_3
cipher = Present(key)
encrypted_1 = cipher.encrypt(plain_1)
encrypted_2 = cipher.encrypt(plain_2)
encrypted_3 = cipher.encrypt(plain_3)
enc_1 = encrypted_1.encode('hex')
enc_2 = encrypted_2.encode('hex')
enc_3 = encrypted_3.encode('hex')
print enc_1
print enc_2
print enc_3
decrypted_1 = cipher.decrypt(encrypted_1)
decrypted_2 = cipher.decrypt(encrypted_2)
decrypted_3 = cipher.decrypt(encrypted_3)
decr_1 = decrypted_1.encode('hex')
decr_2 = decrypted_2.encode('hex')
decr_3 = decrypted_3.encode('hex')
print decr_1.decode('hex')
print decr_2.decode('hex')
print decr_3.decode('hex')
|
|
# Copyright (c) 2003-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author:
# Andres Blanco
# Gustavo Moreira
#
# RFCs for the DNS Server service
#
# 1034 - Domain Names -- Concepts and Facilities [http://www.faqs.org/rfcs/rfc1034.html]
# 1035 - Domain Names -- Implementation and Specification [http://www.faqs.org/rfcs/rfc1035.html]
# 1123 - Requirements for Internet Hosts -- Application and Support [http://www.faqs.org/rfcs/rfc1123.html]
# 1886 - DNS Extensions to Support IP Version 6 [http://www.faqs.org/rfcs/rfc1886.html]
# 1995 - Incremental Zone Transfer in DNS [http://www.faqs.org/rfcs/rfc1995.html]
# 1996 - A Mechanism for Prompt Notification of Zone Changes (DNS NOTIFY) [http://www.faqs.org/rfcs/rfc1996.html]
# 2136 - Dynamic Updates in the Domain Name System (DNS UPDATE) [http://www.faqs.org/rfcs/rfc2136.html]
# 2181 - Clarifications to the DNS Specification [http://www.faqs.org/rfcs/rfc2181.html]
# 2308 - Negative Caching of DNS Queries (DNS NCACHE) [http://www.faqs.org/rfcs/rfc2308.html]
# 2535 - Domain Name System Security Extensions (DNSSEC) [http://www.faqs.org/rfcs/rfc2535.html]
# 2671 - Extension Mechanisms for DNS (EDNS0) [http://www.faqs.org/rfcs/rfc2671.html]
# 2782 - A DNS RR for specifying the location of services (DNS SRV) [http://www.faqs.org/rfcs/rfc2782.html]
# 2930 - Secret Key Establishment for DNS (TKEY RR) [http://www.faqs.org/rfcs/rfc2930.html]
# 3645 - Generic Security Service Algorithm for Secret Key Transaction Authentication for DNS (GSS-TSIG) [http://www.faqs.org/rfcs/rfc3645.html]
# 3646 - DNS Configuration options for Dynamic Host Configuration Protocol for IPv6 (DHCPv6) [http://www.faqs.org/rfcs/rfc3646.html]
#
import socket
import struct
from ImpactPacket import ProtocolPacket
class DNSFlags():
'Bitmap with the flags of a dns packet.'
# QR - Query/Response - 1 bit
QR_QUERY = int("0000000000000000", 2)
QR_RESPONSE = int("1000000000000000", 2)
# OP - Opcode - 4 bits
OP_STANDARD_QUERY = int("0000000000000000", 2) # Standard query.
OP_INVERSE_QUERY = int("0100000000000000", 2) # Inverse query.
OP_STATUS_QUERY = int("0010000000000000", 2) # Server status request.
OP_NOTIFY = int("0000100000000000", 2) # Notify.
OP_UPDATE = int("0100100000000000", 2) # Update.
# AA - Authority Answer - 1 bit
AA_NOT_AUTH_ANSWER = int("0000000000000000", 2) # Not authoritative.
AA_AUTH_ANSWER = int("0000010000000000", 2) # Is authoritative.
# TC - Truncated - 1 bit
TC_NOT_TRUNCATED = int("0000000000000000", 2) # Not truncated.
TC_TRUNCATED = int("0000001000000000", 2) # Message truncated.
# RD - Recursion Desired - 1 bit
RD_NOT_RECURSIVE_QUERY = int("0000000000000000", 2) # Recursion not desired.
RD_RECURSIVE_QUERY = int("0000000100000000", 2) # Recursion desired.
# RA - Recursion Available - 1 bit
RA_NOT_AVAILABLE = int("0000000000000000", 2) # Recursive query support not available.
RA_AVAILABLE = int("0000000010000000", 2) # Recursive query support available.
# Z - 3 bits
Z = int("0000000000000000", 2)
# AD - Authenticated Data - 1 bit
AUTHENTICATED_DATA = int("0000000000100000", 2)
# CD - Checking Disabled - 1 bit
CHECKING_DISABLED = int("0000000000010000", 2)
# RCODE - 4 bits
RCODE_NO_ERROR = int("0000000000000000", 2) # The request completed successfully.
RCODE_FORMAT_ERROR = int("0000000000001000", 2) # The name server was unable to interpret the query.
RCODE_SERVER_FAILURE = int("0000000000000100", 2) # The name server was unable to process this query due to a problem with the name server.
RCODE_NAME_ERROR = int("0000000000001100", 2) # Meaningful only for responses from an authoritative name server, this code signifies that the domain name referenced in the query does not exist.
RCODE_NOT_IMPLEMENTED = int("0000000000000010", 2) # Not Implemented. The name server does not support the requested kind of query.
RCODE_REFUSED = int("0000000000001010", 2) # The name server refuses to perform the specified operation for policy reasons.
RCODE_YXDOMAIN = int("0000000000000110", 2) # Name Exists when it should not.
RCODE_YXRRSET = int("0000000000001110", 2) # RR Set Exists when it should not.
RCODE_NXRRSET = int("0000000000000001", 2) # RR Set that should exist does not.
RCODE_NOAUTH = int("0000000000001001", 2) # Server Not Authoritative for zone.
RCODE_NOTZONE = int("0000000000000101", 2) # Name not contained in zone.
class DNSType():
A = 1 # IPv4 address.
NS = 2 # Authoritative name server.
MD = 3 # Mail destination. Obsolete use MX instead.
MF = 4 # Mail forwarder. Obsolete use MX instead.
CNAME = 5 # Canonical name for an alias.
SOA = 6 # Marks the start of a zone of authority.
MB = 7 # Mailbox domain name.
MG = 8 # Mail group member.
MR = 9 # Mail rename domain name.
NULL = 10 # Null resource record.
WKS = 11 # Well known service description.
PTR = 12 # Domain name pointer.
HINFO = 13 # Host information.
MINFO = 14 # Mailbox or mail list information.
MX = 15 # Mail exchange.
TXT = 16 # Text strings.
RP = 17 # Responsible Person.
AFSDB = 18 # AFS Data Base location.
X25 = 19 # X.25 PSDN address.
ISDN = 20 # ISDN address.
RT = 21 # Route Through.
NSAP = 22 # NSAP address. NSAP style A record.
NSAP_PTR = 23 # NSAP pointer.
SIG = 24 # Security signature.
KEY = 25 # Security key.
PX = 26 # X.400 mail mapping information.
GPOS = 27 # Geographical Position.
AAAA = 28 # IPv6 Address.
LOC = 29 # Location Information.
NXT = 30 # Next Domain (obsolete).
EID = 31 # Endpoint Identifier.
NB = 32 # NetBIOS general Name Service.
NBSTAT = 33 # NetBIOS NODE STATUS.
ATMA = 34 # ATM Address.
NAPTR = 35 # Naming Authority Pointer.
KX = 36 # Key Exchanger.
CERT = 37
A6 = 38
DNAME = 39
SINK = 40
OPT = 41
APL = 42
DS = 43 # Delegation Signer.
SSHFP = 44 # SSH Key Fingerprint.
IPSECKEY = 45
RRSIG = 46
NSEC = 47 # NextSECure.
DNSKEY = 48
DHCID = 49 # DHCP identifier.
NSEC3 = 50
NSEC3PARAM = 51
HIP = 55 # Host Identity Protocol.
NINFO = 56
RKEY = 57
SPF = 99 # Sender Policy Framework.
UINFO = 100
UID = 101
GID = 102
UNSPEC = 103
TKEY = 249
TSIG = 250 # Transaction Signature.
IXFR = 251 # Incremental transfer.
AXFR = 252 # A request for a transfer of an entire zone.
MAILB = 253 # A request for mailbox-related records (MB, MG or MR).
MAILA = 254 # A request for mail agent RRs. Obsolete.
ALL = 255 # A request for all records.
DNSSEC = 32768 # Trust Authorities.
DNSSEC = 32769 # DNSSEC Lookaside Validation.
@staticmethod
def getTypeName(type):
for item, value in DNSType.__dict__.items():
if value == type:
return item
class DNSClass():
RESERVED = 0
IN = 1 # Internet.
CH = 3 # Chaos.
HS = 4 # Hesiod.
NONE = 254
ANY = 255 # QCLASS only
@staticmethod
def getClassName(type):
for item, value in DNSClass.__dict__.items():
if value == type:
return item
class DNS(ProtocolPacket):
'''The Message Header is present in all messages. Never empty.
Contains various flags and values which control the transaction.'''
__TYPE_LEN = 2 # Unsigned 16 bit value.
__CLASS_LEN = 2 # Unsigned 16 bit value.
__POINTER_LEN = 2 # A pointer is an unsigned 16-bit value.
__TTL_LEN = 4 # Unsigned 32 bit value. The time in seconds that the record may be cached.
__RDLENGTH_LEN = 2 # Unsigned 16-bit value that defines the length in bytes (octets) of the RDATA record.
__TYPE_A_LEN = 4 # Unsigned 32-bit value representing the IP address.
__SERIAL_LEN = 4 # Serial Number Unsigned 32-bit integer.
__REFRESH_LEN = 4 # Refresh interval Unsigned 32-bit integer.
__RETRY_LEN = 4 # Retry Interval Unsigned 32-bit integer.
__EXPIRATION_LEN = 4 # Expiration Limit Unsigned 32-bit integer.
__MINTTL_LEN = 4 # Minimum TTL Unsigned 32-bit integer.
__PREF_LEN = 2 # Preference Unsigned 16-bit integer.
__IS_POINTER = int("11000000", 2)
__OFFSETMASK = int("00111111", 2)
def __init__(self, aBuffer = None):
self.__HEADER_BASE_SIZE = 12
self.__TAIL_SIZE = 0
ProtocolPacket.__init__(self, self.__HEADER_BASE_SIZE, self.__TAIL_SIZE)
if aBuffer:
self.load_packet(aBuffer)
def get_transaction_id(self):
'Get 16 bit message ID.'
return self.header.get_word(0)
def set_transaction_id(self, value):
'Set 16 bit message ID.'
self.header.set_word(0, value)
def get_transaction_id_tcp(self):
'Get 16 bit message ID.'
return self.header.get_word(2)
def set_transaction_id_tcp(self, value):
'Set 16 bit message ID.'
self.header.set_word(2, value)
def get_flags(self):
'Get 16 bit flags.'
return self.header.get_word(2)
def set_flags(self, value):
'Set 16 bit flags.'
self.header.set_word(2, value)
def get_flags_tcp(self):
'Get 16 bit flags.'
return self.header.get_word(4)
def set_flags_tcp(self, value):
'Set 16 bit flags.'
self.header.set_word(4, value)
def get_qdcount(self):
'Get Unsigned 16 bit integer specifying the number of entries in the question section.'
return self.header.get_word(4)
def set_qdcount(self, value):
'Set Unsigned 16 bit integer specifying the number of entries in the question section.'
self.header.set_word(4, value)
def get_qdcount_tcp(self):
'Get Unsigned 16 bit integer specifying the number of entries in the question section.'
return self.header.get_word(6)
def set_qdcount_tcp(self, value):
'Set Unsigned 16 bit integer specifying the number of entries in the question section.'
self.header.set_word(6, value)
def get_ancount(self):
'Get Unsigned 16 bit integer specifying the number of resource records in the answer section'
return self.header.get_word(6)
def set_ancount(self, value):
'Set Unsigned 16 bit integer specifying the number of resource records in the answer section'
self.header.set_word(6, value)
def get_nscount(self):
'Get Unsigned 16 bit integer specifying the number of name server resource records in the authority section.'
return self.header.get_word(8)
def set_nscount(self, value):
'Set Unsigned 16 bit integer specifying the number of name server resource records in the authority section.'
self.header.set_word(8, value)
def get_arcount(self):
'Get Unsigned 16 bit integer specifying the number of resource records in the additional records section.'
return self.header.get_word(10)
def set_arcount(self, value):
'Set Unsigned 16 bit integer specifying the number of resource records in the additional records section.'
self.header.set_word(10, value)
def get_questions(self):
'Get a list of the DNS Question.'
return self.__get_questions()[0]
def __get_questions(self):
aux = []
offset = 0
qdcount = self.get_qdcount()
data = self.get_body_as_string()
for _ in range(qdcount): # number of questions
offset, qname = self.parseCompressedMessage(data, offset)
qtype = data[offset:offset+self.__TYPE_LEN]
offset += self.__TYPE_LEN
qclass = data[offset:offset+self.__CLASS_LEN]
offset += self.__CLASS_LEN
qtype = struct.unpack("!H", qtype)[0]
qclass = struct.unpack("!H", qclass)[0]
aux.append((qname, qtype, qclass))
return (aux, offset)
def get_questions_tcp(self):
'Get a list of the DNS Question.'
return self.__get_questions_tcp()[0]
def __get_questions_tcp(self):
aux = []
offset = 2
qdcount = self.get_qdcount_tcp()
data = self.get_body_as_string()
for _ in range(qdcount): # number of questions
offset, qname = self.parseCompressedMessage(data, offset)
qtype = data[offset:offset+self.__TYPE_LEN]
offset += self.__TYPE_LEN
qclass = data[offset:offset+self.__CLASS_LEN]
offset += self.__CLASS_LEN
qtype = struct.unpack("!H", qtype)[0]
qclass = struct.unpack("!H", qclass)[0]
aux.append((qname, qtype, qclass))
return (aux, offset)
def parseCompressedMessage(self, buf, offset=0):
'Parse compressed message defined on rfc1035 4.1.4.'
if offset >= len(buf):
raise Exception("No more data to parse. Offset is bigger than length of buffer.")
byte = struct.unpack("B", buf[offset])[0]
# if the first two bits are ones (11000000=0xC0), the next bits are the offset
if byte & 0xC0 == 0xC0:
# It's a pointer
pointer = struct.unpack("!H", buf[offset:offset+2])[0] # network unsigned short
pointer = (pointer & 0x3FFF) - self.__HEADER_BASE_SIZE
offset += 2
name = self.parseCompressedMessage(buf, pointer)[1]
return (offset, name)
else:
# It's a label
if byte == 0x00:
offset += 1
return (offset, '')
offset += 1
name = buf[offset:offset+byte]
offset += byte
offset, unamed = self.parseCompressedMessage(buf, offset)
if not unamed:
return (offset, name)
else:
return (offset, name + "." + unamed)
def get_answers(self):
return self.__get_answers()[0]
def get_authoritative(self):
return self.__get_authoritative()[0]
def get_additionals(self):
return self.__get_additionals()[0]
def __get_answers(self):
offset = self.__get_questions()[1] # get the initial offset
ancount = self.get_ancount()
return self.__process_answer_structure(offset, ancount)
def __get_authoritative(self):
'Get a list of the DNS Authoritative.'
offset = self.__get_answers()[1] # get the initial offset
nscount = self.get_nscount()
return self.__process_answer_structure(offset, nscount)
def __get_additionals(self):
'Get a list of the DNS Additional Records.'
offset = self.__get_authoritative()[1] # get the initial offset
arcount = self.get_arcount()
return self.__process_answer_structure(offset, arcount)
def __process_answer_structure(self, offset, num):
aux = []
data = self.get_body_as_string()
for _ in range(num):
offset, qname = self.parseCompressedMessage(data, offset)
qtype = data[offset:offset+self.__TYPE_LEN]
qtype = struct.unpack("!H", qtype)[0]
offset += self.__TYPE_LEN
qclass = data[offset:offset+self.__CLASS_LEN]
qclass = struct.unpack("!H", qclass)[0]
offset += self.__CLASS_LEN
qttl_raw = data[offset:offset+self.__TTL_LEN]
qttl = struct.unpack("!L", qttl_raw)[0]
offset += self.__TTL_LEN
qrdlength = data[offset:offset+self.__RDLENGTH_LEN]
qrdlength = struct.unpack("!H", qrdlength)[0]
offset += self.__RDLENGTH_LEN
qrdata = {}
if qtype == DNSType.A:
# IP Address Unsigned 32-bit value representing the IP address
qrdata["IPAddress"] = socket.inet_ntoa(data[offset:offset+qrdlength])
offset += self.__TYPE_A_LEN
elif qtype == DNSType.SOA:
# Primary NS Variable length. The name of the Primary Master for the domain. May be a label, pointer or any combination.
offset, primaryNs = self.parseCompressedMessage(data, offset)
qrdata["PrimaryNS"] = primaryNs
# Admin MB Variable length. The administrator's mailbox. May be a label, pointer or any combination.
offset, adminMb = self.parseCompressedMessage(data, offset)
qrdata["AdminMB"] = adminMb
# Serial Number Unsigned 32-bit integer.
qrdata["SerialNumber"] = struct.unpack("!L", data[offset:offset+self.__SERIAL_LEN])[0]
offset += self.__SERIAL_LEN
# Refresh interval Unsigned 32-bit integer.
qrdata["RefreshInterval"] = struct.unpack("!L", data[offset:offset+self.__REFRESH_LEN])[0]
offset += self.__REFRESH_LEN
# Retry Interval Unsigned 32-bit integer.
qrdata["RetryInterval"] = struct.unpack("!L", data[offset:offset+self.__RETRY_LEN])[0]
offset += self.__RETRY_LEN
# Expiration Limit Unsigned 32-bit integer.
qrdata["ExpirationLimit"] = struct.unpack("!L", data[offset:offset+self.__EXPIRATION_LEN])[0]
offset += self.__EXPIRATION_LEN
# Minimum TTL Unsigned 32-bit integer.
qrdata["MinimumTTL"] = struct.unpack("!L", data[offset:offset+self.__MINTTL_LEN])[0]
offset += self.__MINTTL_LEN
elif qtype == DNSType.MX:
# Preference Unsigned 16-bit integer.
qrdata["Preference"] = struct.unpack("!H", data[offset:offset+self.__PREF_LEN])[0]
# Mail Exchanger The name host name that provides the service. May be a label, pointer or any combination.
offset, mailExch = self.parseCompressedMessage(data, offset)
qrdata["MailExchanger"] = mailExch
elif qtype == DNSType.PTR or qtype == DNSType.NS or qtype == DNSType.CNAME:
# Name The host name that represents the supplied IP address (in the case of a PTR) or the NS name for the supplied domain (in the case of NS). May be a label, pointer or any combination.
offset, name = self.parseCompressedMessage(data, offset)
qrdata["Name"] = name
elif qtype == DNSType.OPT:
# rfc2671 4.3
#NAME domain name empty (root domain)
#TYPE u_int16_t OPT
#CLASS u_int16_t sender's UDP payload size
#TTL u_int32_t extended RCODE and flags
#RDLEN u_int16_t describes RDATA
#RDATA octet stream {attribute,value} pairs
#udp_payload = qclass
udp_payload_size = qclass
ext_rcode = struct.unpack("B", qttl_raw[0])[0]
version = struct.unpack("B", qttl_raw[1])[0]
flags = struct.unpack("!H", qttl_raw[2:4])[0]
qrdata["RDATA"] = data[offset:offset+qrdlength]
offset += qrdlength
aux.append((qname, qtype, udp_payload_size, ext_rcode, version, flags, qrdata))
continue
else:
# We don't know how to parse it, just skip it
offset += qrdlength
aux.append((qname, qtype, qclass, qttl, qrdata))
return (aux, offset)
def get_header_size(self):
return self.__HEADER_BASE_SIZE
def __str__(self):
res = ""
id = self.get_transaction_id()
flags = self.get_flags()
qdcount = self.get_qdcount()
ancount = self.get_ancount()
nscount = self.get_nscount()
arcount = self.get_arcount()
res += "DNS "
if flags & DNSFlags.QR_RESPONSE:
res += "RESPONSE\n"
else:
res += "QUERY\n"
res += " - Transaction ID -- [0x%04x] %d\n" % (id, id)
res += " - Flags ----------- [0x%04x] %d\n" % (flags, flags)
res += " - QdCount --------- [0x%04x] %d\n" % (qdcount, qdcount)
res += " - AnCount --------- [0x%04x] %d\n" % (ancount, ancount)
res += " - NsCount --------- [0x%04x] %d\n" % (nscount, nscount)
res += " - ArCount --------- [0x%04x] %d\n" % (arcount, arcount)
if qdcount > 0:
res += " - Questions:\n"
questions = self.get_questions()
questions.reverse()
while(questions):
qname, qtype, qclass = questions.pop()
format = (qname, DNSType.getTypeName(qtype), qtype, DNSClass.getClassName(qclass), qclass)
res += " * Domain: %s - Type: %s [0x%04x] - Class: %s [0x%04x]\n" % format
if ancount > 0:
res += " - Answers:\n"
answers = self.get_answers()
answers.reverse()
while(answers):
qname, qtype, qclass, qttl, qrdata = answers.pop()
format = (qname, DNSType.getTypeName(qtype), qtype, DNSClass.getClassName(qclass), qclass, qttl, repr(qrdata))
res += " * Domain: %s - Type: %s [0x%04x] - Class: %s [0x%04x] - TTL: %d seconds - %s\n" % format
if nscount > 0:
res += " - Authoritative:\n"
authoritative = self.get_authoritative()
authoritative.reverse()
while(authoritative):
qname, qtype, qclass, qttl, qrdata = authoritative.pop()
format = (qname, DNSType.getTypeName(qtype), qtype, DNSClass.getClassName(qclass), qclass, qttl, repr(qrdata))
res += " * Domain: %s - Type: %s [0x%04x] - Class: %s [0x%04x] - TTL: %d seconds - %s\n" % format
if arcount > 0:
res += " - Additionals:\n"
additionals = self.get_additionals()
for additional in additionals:
qtype = additional[1]
if qtype == DNSType.OPT:
qname, qtype, udp_payload_size, ext_rcode, version, flags, qrdata = additional
format = (DNSType.getTypeName(qtype), qtype, udp_payload_size, ext_rcode, version, flags, repr(qrdata['RDATA']))
res += " * Name: <Root> - Type: %s [0x%04x] - udp payload size: [%d] - extended RCODE: [0x%02x] - EDNS0 version: [0x%02x] - Z Flags: [0x%02x] - RDATA: [%s]\n" % format
else:
qname, qtype, qclass, qttl, qrdata = additional
format = (qname, DNSType.getTypeName(qtype), qtype, DNSClass.getClassName(qclass), qclass, qttl, repr(qrdata))
res += " * Domain: %s - Type: %s [0x%04x] - Class: %s [0x%04x] - TTL: %d seconds - %s\n" % format
return res
def __get_questions_raw(self):
if self.get_qdcount() == 0:
return ''
questions_offset = self.__get_questions()[1]
raw_data = self.get_body_as_string()[:questions_offset]
return raw_data
def __get_answers_raw(self):
if self.get_ancount() == 0:
return ''
questions_offset = self.__get_questions()[1]
answers_offset = self.__get_answers()[1]
raw_data = self.get_body_as_string()[questions_offset: answers_offset]
return raw_data
def __get_authoritative_raw(self):
if self.get_nscount() == 0:
return ''
answers_offset = self.__get_answers()[1]
authoritative_offset = self.__get_authoritative()[1]
raw_data = self.get_body_as_string()[answers_offset:authoritative_offset]
return raw_data
def __get_additionals_raw(self):
if self.get_arcount() == 0:
return ''
authoritative_offset = self.__get_authoritative()[1]
raw_data = self.get_body_as_string()[authoritative_offset:]
return raw_data
def add_answer(self, answer_raw):
'''Add a raw answer'''
questions_raw = self.__get_questions_raw()
answers_raw = self.__get_answers_raw()
authoritative_raw = self.__get_authoritative_raw()
additionals_raw = self.__get_additionals_raw()
answers_raw += answer_raw
body = questions_raw + answers_raw + authoritative_raw + additionals_raw
self.load_body(body) # It breaks children hierarchy
# Increment the answer count
cur_answer_count = self.get_ancount()+1
self.set_ancount(cur_answer_count)
def is_edns0(self):
additionals = self.get_additionals()
for item in additionals:
response_type = item[1]
if response_type == DNSType.OPT:
return True
return False
if __name__ == "__main__":
pkts = [
"\x6a\x8c\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x03\x77\x77\x77" \
"\x05\x74\x61\x72\x74\x61\x03\x63\x6f\x6d\x00\x00\x01\x00\x01",
"\x6a\x8c\x81\x80\x00\x01\x00\x02\x00\x02\x00\x00\x03\x77\x77\x77" \
"\x05\x74\x61\x72\x74\x61\x03\x63\x6f\x6d\x00\x00\x01\x00\x01\xc0" \
"\x0c\x00\x05\x00\x01\x00\x00\x07\x08\x00\x02\xc0\x10\xc0\x10\x00" \
"\x01\x00\x01\x00\x00\x07\x08\x00\x04\x45\x59\x1f\xc7\xc0\x10\x00" \
"\x02\x00\x01\x00\x02\xa3\x00\x00\x0f\x03\x6e\x73\x31\x08\x62\x6c" \
"\x75\x65\x68\x6f\x73\x74\xc0\x16\xc0\x10\x00\x02\x00\x01\x00\x02" \
"\xa3\x00\x00\x06\x03\x6e\x73\x32\xc0\x4d",
"\x82\x75\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x03\x77\x77\x77" \
"\x04\x6a\x68\x6f\x6e\x03\x63\x6f\x6d\x00\x00\x01\x00\x01",
"\x82\x75\x81\x80\x00\x01\x00\x01\x00\x02\x00\x02\x03\x77\x77\x77" \
"\x04\x6a\x68\x6f\x6e\x03\x63\x6f\x6d\x00\x00\x01\x00\x01\xc0\x0c" \
"\x00\x01\x00\x01\x00\x00\x00\x05\x00\x04\xd1\x3b\xc3\x14\xc0\x10" \
"\x00\x02\x00\x01\x00\x00\x06\xf8\x00\x0f\x03\x6e\x73\x31\x08\x74" \
"\x72\x61\x66\x66\x69\x63\x7a\xc0\x15\xc0\x10\x00\x02\x00\x01\x00" \
"\x00\x06\xf8\x00\x06\x03\x6e\x73\x32\xc0\x3e\xc0\x3a\x00\x01\x00" \
"\x01\x00\x00\x00\x0d\x00\x04\xd1\x3b\xc2\xf6\xc0\x55\x00\x01\x00" \
"\x01\x00\x00\x00\x85\x00\x04\xd1\x3b\xc3\xf6",
"\xef\x55\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x04\x6d\x61\x69" \
"\x6c\x06\x67\x6f\x6f\x67\x6c\x65\x03\x63\x6f\x6d\x00\x00\x01\x00" \
"\x01",
"\xef\x55\x81\x80\x00\x01\x00\x04\x00\x04\x00\x04\x04\x6d\x61\x69" \
"\x6c\x06\x67\x6f\x6f\x67\x6c\x65\x03\x63\x6f\x6d\x00\x00\x01\x00" \
"\x01\xc0\x0c\x00\x05\x00\x01\x00\x00\x06\x79\x00\x0f\x0a\x67\x6f" \
"\x6f\x67\x6c\x65\x6d\x61\x69\x6c\x01\x6c\xc0\x11\xc0\x2d\x00\x01" \
"\x00\x01\x00\x00\x00\x77\x00\x04\xd1\x55\xc3\x53\xc0\x2d\x00\x01" \
"\x00\x01\x00\x00\x00\x77\x00\x04\xd1\x55\xc3\x12\xc0\x2d\x00\x01" \
"\x00\x01\x00\x00\x00\x77\x00\x04\xd1\x55\xc3\x13\xc0\x11\x00\x02" \
"\x00\x01\x00\x00\x00\x5d\x00\x06\x03\x6e\x73\x33\xc0\x11\xc0\x11" \
"\x00\x02\x00\x01\x00\x00\x00\x5d\x00\x06\x03\x6e\x73\x34\xc0\x11" \
"\xc0\x11\x00\x02\x00\x01\x00\x00\x00\x5d\x00\x06\x03\x6e\x73\x31" \
"\xc0\x11\xc0\x11\x00\x02\x00\x01\x00\x00\x00\x5d\x00\x06\x03\x6e" \
"\x73\x32\xc0\x11\xc0\x9c\x00\x01\x00\x01\x00\x00\x04\x4e\x00\x04" \
"\xd8\xef\x20\x0a\xc0\xae\x00\x01\x00\x01\x00\x00\x06\x64\x00\x04" \
"\xd8\xef\x22\x0a\xc0\x78\x00\x01\x00\x01\x00\x00\x00\x05\x00\x04" \
"\xd8\xef\x24\x0a\xc0\x8a\x00\x01\x00\x01\x00\x00\x00\x08\x00\x04" \
"\xd8\xef\x26\x0a"
]
for pkt in pkts:
d = DNS(pkt)
print d
|
|
import random, math
import pandas as pd
import numpy as np
import scipy.io
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.manifold import Isomap
from sklearn.neighbors import KNeighborsClassifier
# If you'd like to try this lab with PCA instead of Isomap,
# as the dimensionality reduction technique:
Test_PCA = False
matplotlib.style.use('ggplot') # Look Pretty
def Plot2DBoundary(DTrain, LTrain, DTest, LTest):
# The dots are training samples (img not drawn), and the pics are testing samples (images drawn)
# Play around with the K values. This is very controlled dataset so it should be able to get perfect classification on testing entries
# Play with the K for isomap, play with the K for neighbors.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Transformed Boundary, Image Space -> 2D')
padding = 0.1 # Zoom out
resolution = 1 # Don't get too detailed; smaller values (finer rez) will take longer to compute
colors = ['blue','green','orange','red']
# ------
# Calculate the boundaries of the mesh grid. The mesh grid is
# a standard grid (think graph paper), where each point will be
# sent to the classifier (KNeighbors) to predict what class it
# belongs to. This is why KNeighbors has to be trained against
# 2D data, so we can produce this countour. Once we have the
# label for each point on the grid, we can color it appropriately
# and plot it.
x_min, x_max = DTrain[:, 0].min(), DTrain[:, 0].max()
y_min, y_max = DTrain[:, 1].min(), DTrain[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Using the boundaries, actually make the 2D Grid Matrix:
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# What class does the classifier say about each spot on the chart?
# The values stored in the matrix are the predictions of the model
# at said location:
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the mesh grid as a filled contour plot:
plt.contourf(xx, yy, Z, cmap=plt.cm.terrain, z=-100)
# ------
# When plotting the testing images, used to validate if the algorithm
# is functioning correctly, size them as 5% of the overall chart size
x_size = x_range * 0.05
y_size = y_range * 0.05
# First, plot the images in your TEST dataset
img_num = 0
for index in LTest.index:
# DTest is a regular NDArray, so you'll iterate over that 1 at a time.
x0, y0 = DTest[img_num,0]-x_size/2., DTest[img_num,1]-y_size/2.
x1, y1 = DTest[img_num,0]+x_size/2., DTest[img_num,1]+y_size/2.
# DTest = our images isomap-transformed into 2D. But we still want
# to plot the original image, so we look to the original, untouched
# dataset (at index) to get the pixels:
img = df.iloc[index,:].reshape(num_pixels, num_pixels)
ax.imshow(img, aspect='auto', cmap=plt.cm.gray, interpolation='nearest', zorder=100000, extent=(x0, x1, y0, y1), alpha=0.8)
img_num += 1
# Plot your TRAINING points as well... as points rather than as images
for label in range(len(np.unique(LTrain))):
indices = np.where(LTrain == label)
ax.scatter(DTrain[indices, 0], DTrain[indices, 1], c=colors[label], alpha=0.8, marker='o')
# Plot
plt.show()
#
# TODO: Use the same code from Module4/assignment4.py to load up the
# face_data.mat in a dataset called "df". Be sure to calculate the
# num_pixels value, and to rotate the images to being right-side-up
# instead of sideways. This was demonstrated in the M4/A4 code:
#
# .. your code here ..
mat = scipy.io.loadmat('C:/Data/Projektit/DAT210x/Module4/Datasets/face_data.mat')
df = pd.DataFrame(mat['images']).T
num_images, num_pixels = df.shape
num_pixels = int(math.sqrt(num_pixels))
# Rotate the pictures, so we don't have to crane our necks:
for i in range(num_images):
df.loc[i,:] = df.loc[i,:].reshape(num_pixels, num_pixels).T.reshape(-1)
#
# TODO: Load up your face_labels dataset. It only has a single column, and
# you're only interested in that single column. You will have to slice the
# column out so that you have access to it as a "Series" rather than as a
# "Dataframe". Use an appropriate indexer to take care of that. Also print
# out the labels and compare to the face_labels.csv file to ensure you
# loaded it correctly
#
# .. your code here ..
labels = pd.read_csv('Datasets/face_labels.csv', header=None)
face_labels=labels.ix[:,0]
#
# TODO: Do train_test_split. Use the same code as on the EdX platform in the
# reading material, but set the random_state=7 for reproduceability, and play
# around with the test_size from 0.10 - 0.20 (10-20%). Your labels are actually
# passed in as a series (instead of as an NDArray) so that you can access
# their underlying indices later on. This is necessary so you can find your samples
# in the original dataframe, which you will use to plot your testing data as images
# rather than as points:
#
# .. your code here ..
trainingData, testData, label_train, label_test = train_test_split(df, face_labels, random_state=7, test_size=0.10)
if Test_PCA:
# INFO: PCA is used *before* KNeighbors to simplify your high dimensionality
# image samples down to just 2 principal components! A lot of information
# (variance) is lost during the process, as I'm sure you can imagine. But
# you have to drop the dimension down to two, otherwise you wouldn't be able
# to visualize a 2D decision surface / boundary. In the wild, you'd probably
# leave in a lot more dimensions, but wouldn't need to plot the boundary;
# simply checking the results would suffice.
#
# Your model should only be trained (fit) against the training data (data_train)
# Once you've done this, you need use the model to transform both data_train
# and data_test from their original high-D image feature space, down to 2D
#
#
# TODO: Implement PCA here. ONLY train against your training data, but
# transform both your training + test data, storing the results back into
# data_train, and data_test.
#
# .. your code here ..
pca_model = PCA(n_components=2).fit(trainingData)
data_train = pca_model.transform(trainingData)
data_test = pca_model.transform(testData)
else:
# INFO: Isomap is used *before* KNeighbors to simplify your high dimensionality
# image samples down to just 2 components! A lot of information has been is
# lost during the process, as I'm sure you can imagine. But if you have
# non-linear data that can be represented on a 2D manifold, you probably will
# be left with a far superior dataset to use for classification. Plus by
# having the images in 2D space, you can plot them as well as visualize a 2D
# decision surface / boundary. In the wild, you'd probably leave in a lot
# more dimensions, but wouldn't need to plot the boundary; simply checking
# the results would suffice.
#
# Your model should only be trained (fit) against the training data (data_train)
# Once you've done this, you need use the model to transform both data_train
# and data_test from their original high-D image feature space, down to 2D
#
# TODO: Implement Isomap here. ONLY train against your training data, but
# transform both your training + test data, storing the results back into
# data_train, and data_test.
#
# .. your code here ..
isomap_model = Isomap(n_components=2).fit(trainingData)
data_train= isomap_model.transform(trainingData)
data_test = isomap_model.transform(testData)
#
# TODO: Implement KNeighborsClassifier here. You can use any K value from 1
# through 20, so play around with it and attempt to get good accuracy.
# This is the heart of this assignment: Looking at the 2D points that
# represent your images, along with a list of "answers" or correct class
# labels that those 2d representations should be.
#
# .. your code here ..
naapureita = 20
model = KNeighborsClassifier(n_neighbors=naapureita).fit(data_train, label_train)
# NOTE: K-NEIGHBORS DOES NOT CARE WHAT THE ANSWERS SHOULD BE! In fact, it
# just tosses that information away. All KNeighbors cares about storing is
# your training data (data_train) so that later on when you attempt to
# predict or score samples, it can derive a class for them based on the
# labeling of the sample's near neighbors.
#
# TODO: Calculate + Print the accuracy of the testing set (data_test and
# label_test).
#
# .. your code here ..
print model.score(data_test, label_test)
# Chart the combined decision boundary, the training data as 2D plots, and
# the testing data as small images so we can visually validate performance.
Plot2DBoundary(data_train, label_train, data_test, label_test)
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Whitebox tests for TCP APIs.
"""
from __future__ import division, absolute_import
import errno, socket, os
try:
import resource
except ImportError:
resource = None
from twisted.trial.unittest import TestCase
from twisted.python import log
from twisted.internet.tcp import ECONNABORTED, ENOMEM, ENFILE, EMFILE, ENOBUFS, EINPROGRESS, Port
from twisted.internet.protocol import ServerFactory
from twisted.python.runtime import platform
from twisted.internet.defer import maybeDeferred, gatherResults
from twisted.internet import reactor, interfaces
class PlatformAssumptionsTests(TestCase):
"""
Test assumptions about platform behaviors.
"""
socketLimit = 8192
def setUp(self):
self.openSockets = []
if resource is not None:
# On some buggy platforms we might leak FDs, and the test will
# fail creating the initial two sockets we *do* want to
# succeed. So, we make the soft limit the current number of fds
# plus two more (for the two sockets we want to succeed). If we've
# leaked too many fds for that to work, there's nothing we can
# do.
from twisted.internet.process import _listOpenFDs
newLimit = len(_listOpenFDs()) + 2
self.originalFileLimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (newLimit, self.originalFileLimit[1]))
self.socketLimit = newLimit + 100
def tearDown(self):
while self.openSockets:
self.openSockets.pop().close()
if resource is not None:
# OS X implicitly lowers the hard limit in the setrlimit call
# above. Retrieve the new hard limit to pass in to this
# setrlimit call, so that it doesn't give us a permission denied
# error.
currentHardLimit = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
newSoftLimit = min(self.originalFileLimit[0], currentHardLimit)
resource.setrlimit(resource.RLIMIT_NOFILE, (newSoftLimit, currentHardLimit))
def socket(self):
"""
Create and return a new socket object, also tracking it so it can be
closed in the test tear down.
"""
s = socket.socket()
self.openSockets.append(s)
return s
def test_acceptOutOfFiles(self):
"""
Test that the platform accept(2) call fails with either L{EMFILE} or
L{ENOBUFS} when there are too many file descriptors open.
"""
# Make a server to which to connect
port = self.socket()
port.bind(('127.0.0.1', 0))
serverPortNumber = port.getsockname()[1]
port.listen(5)
# Make a client to use to connect to the server
client = self.socket()
client.setblocking(False)
# Use up all the rest of the file descriptors.
for i in range(self.socketLimit):
try:
self.socket()
except socket.error as e:
if e.args[0] in (EMFILE, ENOBUFS):
# The desired state has been achieved.
break
else:
# Some unexpected error occurred.
raise
else:
self.fail("Could provoke neither EMFILE nor ENOBUFS from platform.")
# Non-blocking connect is supposed to fail, but this is not true
# everywhere (e.g. freeBSD)
self.assertIn(client.connect_ex(('127.0.0.1', serverPortNumber)),
(0, EINPROGRESS))
# Make sure that the accept call fails in the way we expect.
exc = self.assertRaises(socket.error, port.accept)
self.assertIn(exc.args[0], (EMFILE, ENOBUFS))
if platform.getType() == "win32":
test_acceptOutOfFiles.skip = (
"Windows requires an unacceptably large amount of resources to "
"provoke this behavior in the naive manner.")
class SelectReactorTests(TestCase):
"""
Tests for select-specific failure conditions.
"""
def setUp(self):
self.ports = []
self.messages = []
log.addObserver(self.messages.append)
def tearDown(self):
log.removeObserver(self.messages.append)
return gatherResults([
maybeDeferred(p.stopListening)
for p in self.ports])
def port(self, portNumber, factory, interface):
"""
Create, start, and return a new L{Port}, also tracking it so it can
be stopped in the test tear down.
"""
p = Port(portNumber, factory, interface=interface)
p.startListening()
self.ports.append(p)
return p
def _acceptFailureTest(self, socketErrorNumber):
"""
Test behavior in the face of an exception from C{accept(2)}.
On any exception which indicates the platform is unable or unwilling
to allocate further resources to us, the existing port should remain
listening, a message should be logged, and the exception should not
propagate outward from doRead.
@param socketErrorNumber: The errno to simulate from accept.
"""
class FakeSocket(object):
"""
Pretend to be a socket in an overloaded system.
"""
def accept(self):
raise socket.error(
socketErrorNumber, os.strerror(socketErrorNumber))
factory = ServerFactory()
port = self.port(0, factory, interface='127.0.0.1')
originalSocket = port.socket
try:
port.socket = FakeSocket()
port.doRead()
expectedFormat = "Could not accept new connection (%s)"
expectedErrorCode = errno.errorcode[socketErrorNumber]
expectedMessage = expectedFormat % (expectedErrorCode,)
for msg in self.messages:
if msg.get('message') == (expectedMessage,):
break
else:
self.fail("Log event for failed accept not found in "
"%r" % (self.messages,))
finally:
port.socket = originalSocket
def test_tooManyFilesFromAccept(self):
"""
C{accept(2)} can fail with C{EMFILE} when there are too many open file
descriptors in the process. Test that this doesn't negatively impact
any other existing connections.
C{EMFILE} mainly occurs on Linux when the open file rlimit is
encountered.
"""
return self._acceptFailureTest(EMFILE)
def test_noBufferSpaceFromAccept(self):
"""
Similar to L{test_tooManyFilesFromAccept}, but test the case where
C{accept(2)} fails with C{ENOBUFS}.
This mainly occurs on Windows and FreeBSD, but may be possible on
Linux and other platforms as well.
"""
return self._acceptFailureTest(ENOBUFS)
def test_connectionAbortedFromAccept(self):
"""
Similar to L{test_tooManyFilesFromAccept}, but test the case where
C{accept(2)} fails with C{ECONNABORTED}.
It is not clear whether this is actually possible for TCP
connections on modern versions of Linux.
"""
return self._acceptFailureTest(ECONNABORTED)
def test_noFilesFromAccept(self):
"""
Similar to L{test_tooManyFilesFromAccept}, but test the case where
C{accept(2)} fails with C{ENFILE}.
This can occur on Linux when the system has exhausted (!) its supply
of inodes.
"""
return self._acceptFailureTest(ENFILE)
if platform.getType() == 'win32':
test_noFilesFromAccept.skip = "Windows accept(2) cannot generate ENFILE"
def test_noMemoryFromAccept(self):
"""
Similar to L{test_tooManyFilesFromAccept}, but test the case where
C{accept(2)} fails with C{ENOMEM}.
On Linux at least, this can sensibly occur, even in a Python program
(which eats memory like no ones business), when memory has become
fragmented or low memory has been filled (d_alloc calls
kmem_cache_alloc calls kmalloc - kmalloc only allocates out of low
memory).
"""
return self._acceptFailureTest(ENOMEM)
if platform.getType() == 'win32':
test_noMemoryFromAccept.skip = "Windows accept(2) cannot generate ENOMEM"
if not interfaces.IReactorFDSet.providedBy(reactor):
skipMsg = 'This test only applies to reactors that implement IReactorFDset'
PlatformAssumptionsTests.skip = skipMsg
SelectReactorTests.skip = skipMsg
|
|
# Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import _multinomial_grad_loss_all_samples
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.linear_model.base import make_dataset
from sklearn.linear_model.logistic import _multinomial_loss_grad
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import row_norms
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.datasets import make_blobs, load_iris
from sklearn.base import clone
iris = load_iris()
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
sum_gradient += update - gradient_memory[idx]
gradient_memory[idx] = update
if fit_intercept:
intercept_sum_gradient += (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
sum_gradient += update - (gradient_memory[idx] * entry)
if fit_intercept:
intercept_sum_gradient += gradient - gradient_memory[idx]
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1))
+ fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
n_iter = 80
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
clf = LogisticRegression(solver="sag", fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=10)
assert_array_almost_equal(intercept, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
#assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
#assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = row_norms(X, squared=True).max()
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for fit_intercept in (True, False):
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha, "squared",
fit_intercept)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
def test_multinomial_loss():
# test if the multinomial loss and gradient computations are consistent
X, y = iris.data, iris.target.astype(np.float64)
n_samples, n_features = X.shape
n_classes = len(np.unique(y))
rng = check_random_state(42)
weights = rng.randn(n_features, n_classes)
intercept = rng.randn(n_classes)
sample_weights = rng.randn(n_samples)
np.abs(sample_weights, sample_weights)
# compute loss and gradient like in multinomial SAG
dataset, _ = make_dataset(X, y, sample_weights, random_state=42)
loss_1, grad_1 = _multinomial_grad_loss_all_samples(dataset, weights,
intercept, n_samples,
n_features, n_classes)
# compute loss and gradient like in multinomial LogisticRegression
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
# comparison
assert_array_almost_equal(grad_1, grad_2)
assert_almost_equal(loss_1, loss_2)
def test_multinomial_loss_ground_truth():
# n_samples, n_features, n_classes = 4, 2, 3
n_classes = 3
X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]])
y = np.array([0, 1, 2, 0])
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]])
intercept = np.array([1., 0, -.2])
sample_weights = np.array([0.8, 1, 1, 0.8])
prediction = np.dot(X, weights) + intercept
logsumexp_prediction = logsumexp(prediction, axis=1)
p = prediction - logsumexp_prediction[:, np.newaxis]
loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum()
diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin)
grad_1 = np.dot(X.T, diff)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
assert_almost_equal(loss_1, loss_2)
assert_array_almost_equal(grad_1, grad_2)
# ground truth
loss_gt = 11.680360354325961
grad_gt = np.array([[-0.557487, -1.619151, +2.176638],
[-0.903942, +5.258745, -4.354803]])
assert_almost_equal(loss_1, loss_gt)
assert_array_almost_equal(grad_1, grad_gt)
|
|
# Copyright 2012-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of random helper functionality."""
import stat
import time
import platform, subprocess, operator, os, shutil, re
import collections
from glob import glob
# Put this in objects that should not get dumped to pickle files
# by accident.
import threading
an_unpicklable_object = threading.Lock()
class MesonException(Exception):
'''Exceptions thrown by Meson'''
class EnvironmentException(MesonException):
'''Exceptions thrown while processing and creating the build environment'''
class FileMode:
# The first triad is for owner permissions, the second for group permissions,
# and the third for others (everyone else).
# For the 1st character:
# 'r' means can read
# '-' means not allowed
# For the 2nd character:
# 'w' means can write
# '-' means not allowed
# For the 3rd character:
# 'x' means can execute
# 's' means can execute and setuid/setgid is set (owner/group triads only)
# 'S' means cannot execute and setuid/setgid is set (owner/group triads only)
# 't' means can execute and sticky bit is set ("others" triads only)
# 'T' means cannot execute and sticky bit is set ("others" triads only)
# '-' means none of these are allowed
#
# The meanings of 'rwx' perms is not obvious for directories; see:
# https://www.hackinglinuxexposed.com/articles/20030424.html
#
# For information on this notation such as setuid/setgid/sticky bits, see:
# https://en.wikipedia.org/wiki/File_system_permissions#Symbolic_notation
symbolic_perms_regex = re.compile('[r-][w-][xsS-]' # Owner perms
'[r-][w-][xsS-]' # Group perms
'[r-][w-][xtT-]') # Others perms
def __init__(self, perms=None, owner=None, group=None):
self.perms_s = perms
self.perms = self.perms_s_to_bits(perms)
self.owner = owner
self.group = group
def __repr__(self):
ret = '<FileMode: {!r} owner={} group={}'
return ret.format(self.perms_s, self.owner, self.group)
@classmethod
def perms_s_to_bits(cls, perms_s):
'''
Does the opposite of stat.filemode(), converts strings of the form
'rwxr-xr-x' to st_mode enums which can be passed to os.chmod()
'''
if perms_s is None:
# No perms specified, we will not touch the permissions
return -1
eg = 'rwxr-xr-x'
if not isinstance(perms_s, str):
msg = 'Install perms must be a string. For example, {!r}'
raise MesonException(msg.format(eg))
if len(perms_s) != 9 or not cls.symbolic_perms_regex.match(perms_s):
msg = 'File perms {!r} must be exactly 9 chars. For example, {!r}'
raise MesonException(msg.format(perms_s, eg))
perms = 0
# Owner perms
if perms_s[0] == 'r':
perms |= stat.S_IRUSR
if perms_s[1] == 'w':
perms |= stat.S_IWUSR
if perms_s[2] == 'x':
perms |= stat.S_IXUSR
elif perms_s[2] == 'S':
perms |= stat.S_ISUID
elif perms_s[2] == 's':
perms |= stat.S_IXUSR
perms |= stat.S_ISUID
# Group perms
if perms_s[3] == 'r':
perms |= stat.S_IRGRP
if perms_s[4] == 'w':
perms |= stat.S_IWGRP
if perms_s[5] == 'x':
perms |= stat.S_IXGRP
elif perms_s[5] == 'S':
perms |= stat.S_ISGID
elif perms_s[5] == 's':
perms |= stat.S_IXGRP
perms |= stat.S_ISGID
# Others perms
if perms_s[6] == 'r':
perms |= stat.S_IROTH
if perms_s[7] == 'w':
perms |= stat.S_IWOTH
if perms_s[8] == 'x':
perms |= stat.S_IXOTH
elif perms_s[8] == 'T':
perms |= stat.S_ISVTX
elif perms_s[8] == 't':
perms |= stat.S_IXOTH
perms |= stat.S_ISVTX
return perms
class File:
def __init__(self, is_built, subdir, fname):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
assert(isinstance(self.subdir, str))
assert(isinstance(self.fname, str))
def __str__(self):
return self.relative_name()
def __repr__(self):
ret = '<File: {0}'
if not self.is_built:
ret += ' (not built)'
ret += '>'
return ret.format(self.relative_name())
@staticmethod
def from_source_file(source_root, subdir, fname):
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir, fname):
return File(True, subdir, fname)
@staticmethod
def from_absolute_file(fname):
return File(False, '', fname)
def rel_to_builddir(self, build_to_src):
if self.is_built:
return self.relative_name()
else:
return os.path.join(build_to_src, self.subdir, self.fname)
def absolute_path(self, srcdir, builddir):
absdir = srcdir
if self.is_built:
absdir = builddir
return os.path.join(absdir, self.relative_name())
def endswith(self, ending):
return self.fname.endswith(ending)
def split(self, s):
return self.fname.split(s)
def __eq__(self, other):
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self):
return hash((self.fname, self.subdir, self.is_built))
def relative_name(self):
return os.path.join(self.subdir, self.fname)
def get_meson_script(env, script):
'''
Given the path of `meson.py`/`meson`, get the path of a meson script such
as `mesonintrospect` or `mesontest`.
'''
meson_py = env.get_build_command()
(base, ext) = os.path.splitext(meson_py)
return os.path.join(os.path.dirname(base), script + ext)
def get_compiler_for_source(compilers, src):
for comp in compilers:
if comp.can_compile(src):
return comp
raise RuntimeError('No specified compiler can handle file {!s}'.format(src))
def classify_unity_sources(compilers, sources):
compsrclist = {}
for src in sources:
comp = get_compiler_for_source(compilers, src)
if comp not in compsrclist:
compsrclist[comp] = [src]
else:
compsrclist[comp].append(src)
return compsrclist
def flatten(item):
if not isinstance(item, list):
return [item]
result = []
for i in item:
if isinstance(i, list):
result += flatten(i)
else:
result.append(i)
return result
def is_osx():
return platform.system().lower() == 'darwin'
def is_linux():
return platform.system().lower() == 'linux'
def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def is_cygwin():
platname = platform.system().lower()
return platname.startswith('cygwin')
def is_debianlike():
return os.path.isfile('/etc/debian_version')
def exe_exists(arglist):
try:
p = subprocess.Popen(arglist, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
return True
except FileNotFoundError:
pass
return False
def detect_vcs(source_dir):
vcs_systems = [
dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'),
dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -i', rev_regex = '(.*)', dep = '.hg/dirstate'),
dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'),
dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'),
]
segs = source_dir.replace('\\', '/').split('/')
for i in range(len(segs), -1, -1):
curdir = '/'.join(segs[:i])
for vcs in vcs_systems:
if os.path.isdir(os.path.join(curdir, vcs['repo_dir'])) and shutil.which(vcs['cmd']):
vcs['wc_dir'] = curdir
return vcs
return None
def grab_leading_numbers(vstr, strict=False):
result = []
for x in vstr.split('.'):
try:
result.append(int(x))
except ValueError as e:
if strict:
msg = 'Invalid version to compare against: {!r}; only ' \
'numeric digits separated by "." are allowed: ' + str(e)
raise MesonException(msg.format(vstr))
break
return result
numpart = re.compile('[0-9.]+')
def version_compare(vstr1, vstr2, strict=False):
match = numpart.match(vstr1.strip())
if match is None:
msg = 'Uncomparable version string {!r}.'
raise MesonException(msg.format(vstr1))
vstr1 = match.group(0)
if vstr2.startswith('>='):
cmpop = operator.ge
vstr2 = vstr2[2:]
elif vstr2.startswith('<='):
cmpop = operator.le
vstr2 = vstr2[2:]
elif vstr2.startswith('!='):
cmpop = operator.ne
vstr2 = vstr2[2:]
elif vstr2.startswith('=='):
cmpop = operator.eq
vstr2 = vstr2[2:]
elif vstr2.startswith('='):
cmpop = operator.eq
vstr2 = vstr2[1:]
elif vstr2.startswith('>'):
cmpop = operator.gt
vstr2 = vstr2[1:]
elif vstr2.startswith('<'):
cmpop = operator.lt
vstr2 = vstr2[1:]
else:
cmpop = operator.eq
varr1 = grab_leading_numbers(vstr1, strict)
varr2 = grab_leading_numbers(vstr2, strict)
return cmpop(varr1, varr2)
def version_compare_many(vstr1, conditions):
if not isinstance(conditions, (list, tuple, frozenset)):
conditions = [conditions]
found = []
not_found = []
for req in conditions:
if not version_compare(vstr1, req, strict=True):
not_found.append(req)
else:
found.append(req)
return not_found == [], not_found, found
def default_libdir():
if is_debianlike():
try:
pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
if pc.returncode == 0:
archpath = stdo.decode().strip()
return 'lib/' + archpath
except Exception:
pass
if os.path.isdir('/usr/lib64') and not os.path.islink('/usr/lib64'):
return 'lib64'
return 'lib'
def default_libexecdir():
# There is no way to auto-detect this, so it must be set at build time
return 'libexec'
def default_prefix():
return 'c:/' if is_windows() else '/usr/local'
def get_library_dirs():
if is_windows():
return ['C:/mingw/lib'] # Fixme
if is_osx():
return ['/usr/lib'] # Fix me as well.
# The following is probably Debian/Ubuntu specific.
# /usr/local/lib is first because it contains stuff
# installed by the sysadmin and is probably more up-to-date
# than /usr/lib. If you feel that this search order is
# problematic, please raise the issue on the mailing list.
unixdirs = ['/usr/local/lib', '/usr/lib', '/lib']
plat = subprocess.check_output(['uname', '-m']).decode().strip()
# This is a terrible hack. I admit it and I'm really sorry.
# I just don't know what the correct solution is.
if plat == 'i686':
plat = 'i386'
if plat.startswith('arm'):
plat = 'arm'
unixdirs += glob('/usr/lib/' + plat + '*')
if os.path.exists('/usr/lib64'):
unixdirs.append('/usr/lib64')
unixdirs += glob('/lib/' + plat + '*')
if os.path.exists('/lib64'):
unixdirs.append('/lib64')
unixdirs += glob('/lib/' + plat + '*')
return unixdirs
def do_replacement(regex, line, confdata):
match = re.search(regex, line)
missing_variables = set()
while match:
varname = match.group(1)
if varname in confdata:
(var, desc) = confdata.get(varname)
if isinstance(var, str):
pass
elif isinstance(var, int):
var = str(var)
else:
raise RuntimeError('Tried to replace a variable with something other than a string or int.')
else:
missing_variables.add(varname)
var = ''
line = line.replace('@' + varname + '@', var)
match = re.search(regex, line)
return line, missing_variables
def do_mesondefine(line, confdata):
arr = line.split()
if len(arr) != 2:
raise MesonException('#mesondefine does not contain exactly two tokens: %s', line.strip())
varname = arr[1]
try:
(v, desc) = confdata.get(varname)
except KeyError:
return '/* #undef %s */\n' % varname
if isinstance(v, bool):
if v:
return '#define %s\n' % varname
else:
return '#undef %s\n' % varname
elif isinstance(v, int):
return '#define %s %d\n' % (varname, v)
elif isinstance(v, str):
return '#define %s %s\n' % (varname, v)
else:
raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
def do_conf_file(src, dst, confdata):
try:
with open(src, encoding='utf-8') as f:
data = f.readlines()
except Exception as e:
raise MesonException('Could not read input file %s: %s' % (src, str(e)))
# Only allow (a-z, A-Z, 0-9, _, -) as valid characters for a define
# Also allow escaping '@' with '\@'
regex = re.compile(r'[^\\]?@([-a-zA-Z0-9_]+)@')
result = []
missing_variables = set()
for line in data:
if line.startswith('#mesondefine'):
line = do_mesondefine(line, confdata)
else:
line, missing = do_replacement(regex, line, confdata)
missing_variables.update(missing)
result.append(line)
dst_tmp = dst + '~'
with open(dst_tmp, 'w', encoding='utf-8') as f:
f.writelines(result)
shutil.copymode(src, dst_tmp)
replace_if_different(dst, dst_tmp)
return missing_variables
def dump_conf_header(ofilename, cdata):
with open(ofilename, 'w', encoding='utf-8') as ofile:
ofile.write('''/*
* Autogenerated by the Meson build system.
* Do not edit, your changes will be lost.
*/
#pragma once
''')
for k in sorted(cdata.keys()):
(v, desc) = cdata.get(k)
if desc:
ofile.write('/* %s */\n' % desc)
if isinstance(v, bool):
if v:
ofile.write('#define %s\n\n' % k)
else:
ofile.write('#undef %s\n\n' % k)
elif isinstance(v, (int, str)):
ofile.write('#define %s %s\n\n' % (k, v))
else:
raise MesonException('Unknown data type in configuration file entry: ' + k)
def replace_if_different(dst, dst_tmp):
# If contents are identical, don't touch the file to prevent
# unnecessary rebuilds.
different = True
try:
with open(dst, 'r') as f1, open(dst_tmp, 'r') as f2:
if f1.read() == f2.read():
different = False
except FileNotFoundError:
pass
if different:
os.replace(dst_tmp, dst)
else:
os.unlink(dst_tmp)
def typeslistify(item, types):
'''
Ensure that type(@item) is one of @types or a
list of items all of which are of type @types
'''
if isinstance(item, types):
item = [item]
if not isinstance(item, list):
raise MesonException('Item must be a list or one of {!r}'.format(types))
for i in item:
if i is not None and not isinstance(i, types):
raise MesonException('List item must be one of {!r}'.format(types))
return item
def stringlistify(item):
return typeslistify(item, str)
def expand_arguments(args):
expended_args = []
for arg in args:
if not arg.startswith('@'):
expended_args.append(arg)
continue
args_file = arg[1:]
try:
with open(args_file) as f:
extended_args = f.read().split()
expended_args += extended_args
except Exception as e:
print('Error expanding command line arguments, %s not found' % args_file)
print(e)
return None
return expended_args
def Popen_safe(args, write=None, stderr=subprocess.PIPE, **kwargs):
p = subprocess.Popen(args, universal_newlines=True,
close_fds=False,
stdout=subprocess.PIPE,
stderr=stderr, **kwargs)
o, e = p.communicate(write)
return p, o, e
def commonpath(paths):
'''
For use on Python 3.4 where os.path.commonpath is not available.
We currently use it everywhere so this receives enough testing.
'''
# XXX: Replace me with os.path.commonpath when we start requiring Python 3.5
import pathlib
if not paths:
raise ValueError('arg is an empty sequence')
common = pathlib.PurePath(paths[0])
for path in paths[1:]:
new = []
path = pathlib.PurePath(path)
for c, p in zip(common.parts, path.parts):
if c != p:
break
new.append(c)
# Don't convert '' into '.'
if not new:
common = ''
break
new = os.path.join(*new)
common = pathlib.PurePath(new)
return str(common)
def iter_regexin_iter(regexiter, initer):
'''
Takes each regular expression in @regexiter and tries to search for it in
every item in @initer. If there is a match, returns that match.
Else returns False.
'''
for regex in regexiter:
for ii in initer:
if not isinstance(ii, str):
continue
match = re.search(regex, ii)
if match:
return match.group()
return False
def _substitute_values_check_errors(command, values):
# Error checking
inregex = ('@INPUT([0-9]+)?@', '@PLAINNAME@', '@BASENAME@')
outregex = ('@OUTPUT([0-9]+)?@', '@OUTDIR@')
if '@INPUT@' not in values:
# Error out if any input-derived templates are present in the command
match = iter_regexin_iter(inregex, command)
if match:
m = 'Command cannot have {!r}, since no input files were specified'
raise MesonException(m.format(match))
else:
if len(values['@INPUT@']) > 1:
# Error out if @PLAINNAME@ or @BASENAME@ is present in the command
match = iter_regexin_iter(inregex[1:], command)
if match:
raise MesonException('Command cannot have {!r} when there is '
'more than one input file'.format(match))
# Error out if an invalid @INPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match = re.search(inregex[0], each)
if match and match.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} inputs'
raise MesonException(m.format(match.group(), len(values['@INPUT@'])))
if '@OUTPUT@' not in values:
# Error out if any output-derived templates are present in the command
match = iter_regexin_iter(outregex, command)
if match:
m = 'Command cannot have {!r} since there are no outputs'
raise MesonException(m.format(match))
else:
# Error out if an invalid @OUTPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match = re.search(outregex[0], each)
if match and match.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} outputs'
raise MesonException(m.format(match.group(), len(values['@OUTPUT@'])))
def substitute_values(command, values):
'''
Substitute the template strings in the @values dict into the list of
strings @command and return a new list. For a full list of the templates,
see get_filenames_templates_dict()
If multiple inputs/outputs are given in the @values dictionary, we
substitute @INPUT@ and @OUTPUT@ only if they are the entire string, not
just a part of it, and in that case we substitute *all* of them.
'''
# Error checking
_substitute_values_check_errors(command, values)
# Substitution
outcmd = []
for vv in command:
if not isinstance(vv, str):
outcmd.append(vv)
elif '@INPUT@' in vv:
inputs = values['@INPUT@']
if vv == '@INPUT@':
outcmd += inputs
elif len(inputs) == 1:
outcmd.append(vv.replace('@INPUT@', inputs[0]))
else:
raise MesonException("Command has '@INPUT@' as part of a "
"string and more than one input file")
elif '@OUTPUT@' in vv:
outputs = values['@OUTPUT@']
if vv == '@OUTPUT@':
outcmd += outputs
elif len(outputs) == 1:
outcmd.append(vv.replace('@OUTPUT@', outputs[0]))
else:
raise MesonException("Command has '@OUTPUT@' as part of a "
"string and more than one output file")
# Append values that are exactly a template string.
# This is faster than a string replace.
elif vv in values:
outcmd.append(values[vv])
# Substitute everything else with replacement
else:
for key, value in values.items():
if key in ('@INPUT@', '@OUTPUT@'):
# Already done above
continue
vv = vv.replace(key, value)
outcmd.append(vv)
return outcmd
def get_filenames_templates_dict(inputs, outputs):
'''
Create a dictionary with template strings as keys and values as values for
the following templates:
@INPUT@ - the full path to one or more input files, from @inputs
@OUTPUT@ - the full path to one or more output files, from @outputs
@OUTDIR@ - the full path to the directory containing the output files
If there is only one input file, the following keys are also created:
@PLAINNAME@ - the filename of the input file
@BASENAME@ - the filename of the input file with the extension removed
If there is more than one input file, the following keys are also created:
@INPUT0@, @INPUT1@, ... one for each input file
If there is more than one output file, the following keys are also created:
@OUTPUT0@, @OUTPUT1@, ... one for each output file
'''
values = {}
# Gather values derived from the input
if inputs:
# We want to substitute all the inputs.
values['@INPUT@'] = inputs
for (ii, vv) in enumerate(inputs):
# Write out @INPUT0@, @INPUT1@, ...
values['@INPUT{}@'.format(ii)] = vv
if len(inputs) == 1:
# Just one value, substitute @PLAINNAME@ and @BASENAME@
values['@PLAINNAME@'] = plain = os.path.split(inputs[0])[1]
values['@BASENAME@'] = os.path.splitext(plain)[0]
if outputs:
# Gather values derived from the outputs, similar to above.
values['@OUTPUT@'] = outputs
for (ii, vv) in enumerate(outputs):
values['@OUTPUT{}@'.format(ii)] = vv
# Outdir should be the same for all outputs
values['@OUTDIR@'] = os.path.split(outputs[0])[0]
# Many external programs fail on empty arguments.
if values['@OUTDIR@'] == '':
values['@OUTDIR@'] = '.'
return values
def windows_proof_rmtree(f):
# On Windows if anyone is holding a file open you can't
# delete it. As an example an anti virus scanner might
# be scanning files you are trying to delete. The only
# way to fix this is to try again and again.
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
for d in delays:
try:
shutil.rmtree(f)
return
except (OSError, PermissionError):
time.sleep(d)
# Try one last time and throw if it fails.
shutil.rmtree(f)
def unholder_array(entries):
result = []
for e in entries:
if hasattr(e, 'held_object'):
e = e.held_object
result.append(e)
return result
class OrderedSet(collections.MutableSet):
"""A set that preserves the order in which items are added, by first
insertion.
"""
def __init__(self, iterable=None):
self.__container = collections.OrderedDict()
if iterable:
self.update(iterable)
def __contains__(self, value):
return value in self.__container
def __iter__(self):
return iter(self.__container.keys())
def __len__(self):
return len(self.__container)
def __repr__(self):
# Don't print 'OrderedSet("")' for an empty set.
if self.__container:
return 'OrderedSet("{}")'.format(
'", "'.join(repr(e) for e in self.__container.keys()))
return 'OrderedSet()'
def add(self, value):
self.__container[value] = None
def discard(self, value):
if value in self.__container:
del self.__container[value]
def update(self, iterable):
for item in iterable:
self.__container[item] = None
def difference(self, set_):
return type(self)(e for e in self if e not in set_)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Message.message_id'
db.add_column(
'sentry_message',
'message_id',
self.gf('django.db.models.fields.CharField')(max_length=32, unique=True, null=True),
keep_default=False
)
def backwards(self, orm):
# Deleting field 'Message.message_id'
db.delete_column('sentry_message', 'message_id')
models = {
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.groupedmessage': {
'Meta': {
'unique_together': "(('logger', 'view', 'checksum'),)",
'object_name': 'GroupedMessage'
},
'checksum': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
}),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'view': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.message': {
'Meta': {
'object_name': 'Message'
},
'checksum': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'message_set'",
'null': 'True',
'to': "orm['sentry.GroupedMessage']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'server_name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'db_index': 'True'
}),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'view': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
}
}
complete_apps = ['sentry']
|
|
#!python
# coding=utf-8
import warnings
from collections import OrderedDict
import numpy as np
import simplejson as json
from netCDF4 import Dataset
from .utils import (
generic_masked,
JSONEncoder,
safe_attribute_typing,
safe_issubdtype
)
from .meta import (
MetaInterface,
ncpyattributes,
string_to_dtype,
untype_attributes
)
from . import logger as L
# Attribute that need to be of the same type as the variables
_TYPE_SENSITIVE_ATTRIBUTES = [
'_FillValue',
'missing_value',
'valid_min',
'valid_max',
'valid_range',
'display_min',
'display_max',
'display_range',
'colorBarMinimum',
'colorBarMaximum',
]
class EnhancedDataset(Dataset):
def __del__(self):
try:
self.close()
except RuntimeError:
pass
def close(self):
if not self.isopen():
return
super(EnhancedDataset, self).close()
def vatts(self, vname):
d = {}
var = self.variables[vname]
for k in var.ncattrs():
d[k] = var.getncattr(k)
return d
def filter_by_attrs(self, *args, **kwargs):
return self.get_variables_by_attributes(*args, **kwargs)
def __apply_meta_interface__(self, meta, **kwargs):
warnings.warn(
'`__apply_meta_interface__` is deprecated. Use `apply_meta()` instead',
DeprecationWarning
)
return self.apply_meta(meta, **kwargs)
def __getattr__(self, name):
if name in ['__meta_interface__', '_meta']:
warnings.warn(
'`__meta_interface__` and `_meta` are deprecated. Use `meta()` instead',
DeprecationWarning
)
return self.meta()
else:
return super().__getattr__(name)
def apply_meta(self, *args, **kwargs):
""" Shortcut to the JSON object without writing any data"""
kwargs['create_data'] = False
return self.apply_json(*args, **kwargs)
def meta(self, *args, **kwargs):
""" Shortcut to the JSON object without any data"""
kwargs['return_data'] = False
return self.json(*args, **kwargs)
def json(self, return_data=True, fill_data=True):
ds = OrderedDict()
vs = OrderedDict()
gs = ncpyattributes({ ga: self.getncattr(ga) for ga in self.ncattrs() })
# Dimensions
for dname, dim in self.dimensions.items():
if dim.isunlimited():
ds[dname] = None
else:
ds[dname] = dim.size
# Variables
for k, v in self.variables.items():
typed = v.dtype
if isinstance(typed, np.dtype):
typed = str(typed.name)
elif isinstance(typed, type):
typed = typed.__name__
vattrs = { va: v.getncattr(va) for va in v.ncattrs() }
vardict = {
'attributes': ncpyattributes(vattrs),
'shape': v.dimensions,
'type': typed
}
if return_data is True:
vdata = generic_masked(v[:], attrs=vattrs)
if fill_data is True:
vdata = vdata.filled()
vardict['data'] = vdata.tolist()
vs[k] = vardict
return MetaInterface(
dimensions=ds,
variables=vs,
attributes=gs
)
def apply_json(self, meta, create_vars=True, create_dims=True, create_data=True):
"""Apply a meta interface object to a netCDF4 compatible object"""
ds = meta.get('dimensions', OrderedDict())
gs = meta.get('attributes', OrderedDict())
vs = meta.get('variables', OrderedDict())
# Dimensions
for dname, dsize in ds.items():
# Ignore dimension sizes less than 0
if dsize and dsize < 0:
continue
if dname not in self.dimensions:
# Don't create new dimensions
if create_dims is False:
continue
self.createDimension(dname, size=dsize)
else:
dfilesize = self.dimensions[dname].size
if dfilesize != dsize:
L.warning("Not changing size of dimension {}. file: {}, meta: {}".format(
dname, dfilesize, dsize
))
# Global attributes
typed_gs = untype_attributes(gs)
self.setncatts(typed_gs)
# Variables
for vname, vvalue in vs.items():
vatts = untype_attributes(vvalue.get('attributes', {}))
if vname not in self.variables:
# Don't create new variables
if create_vars is False:
continue
if 'shape' not in vvalue and 'type' not in vvalue:
L.debug("Skipping {} creation, no shape or no type defined".format(vname))
continue
shape = vvalue.get('shape', []) # Dimension names
vardtype = string_to_dtype(vvalue.get('type'))
if safe_issubdtype(vardtype, np.floating):
defaultfill = vardtype.type(np.nan) # We can use `nan` for floats
elif vardtype.kind in ['U', 'S']:
defaultfill = None # No fillvalue on VLENs
else:
# Use a masked value which evaluates to different things depending on the dtype
# For integers is resolves to `0`.
defaultfill = vardtype.type(np.ma.masked)
fillmiss = vatts.get('_FillValue', vatts.get('missing_value', defaultfill))
newvar = self.createVariable(
vname,
vardtype,
dimensions=shape,
fill_value=fillmiss
)
else:
newvar = self.variables[vname]
# Now assign the data if is exists
if create_data is True and 'data' in vvalue:
# Because the JSON format can be flattened already we are just
# going to always reshape the data to the variable shape
data = generic_masked(
np.array(vvalue['data'], dtype=newvar.dtype).flatten()
).reshape(newvar.shape)
newvar[:] = data
# Don't re-assign fill value attributes
if '_FillValue' in vatts:
del vatts['_FillValue']
if 'missing_value' in vatts:
del vatts['missing_value']
# Convert any attribute that need to match the variables dtype to that dtype
for sattr in _TYPE_SENSITIVE_ATTRIBUTES:
if sattr in vatts:
vatts[sattr] = safe_attribute_typing(newvar.dtype, vatts[sattr])
newvar.setncatts(vatts)
def to_json(self, *args, **kwargs):
return json.dumps(self.to_dict(), *args, **kwargs)
def json_attributes(self, vfuncs=None):
"""
vfuncs can be any callable that accepts a single argument, the
Variable object, and returns a dictionary of new attributes to
set. These will overwrite existing attributes
"""
vfuncs = vfuncs or []
js = {'global': {}}
for k in self.ncattrs():
js['global'][k] = self.getncattr(k)
for varname, var in self.variables.items():
js[varname] = {}
for k in var.ncattrs():
z = var.getncattr(k)
try:
assert not np.isnan(z).all()
js[varname][k] = z
except AssertionError:
js[varname][k] = None
except TypeError:
js[varname][k] = z
for vf in vfuncs:
try:
js[varname].update(vfuncs(var))
except BaseException:
L.exception("Could not apply custom variable attribute function")
return json.loads(json.dumps(js, cls=JSONEncoder))
def update_attributes(self, attributes):
for k, v in attributes.pop('global', {}).items():
try:
self.setncattr(k, v)
except BaseException:
L.warning('Could not set global attribute {}: {}'.format(k, v))
for k, v in attributes.items():
if k in self.variables:
for n, z in v.items():
# Don't re-assign fill value attributes
if n in ['_FillValue', 'missing_value']:
L.warning('Refusing to set {} on {}'.format(n, k))
continue
try:
self.variables[k].setncattr(n, z)
except BaseException:
L.warning('Could not set attribute {} on {}'.format(n, k))
self.sync()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import traceback
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util import compat
class OpError(Exception):
"""A generic error that is raised when TensorFlow execution fails.
Whenever possible, the session will raise a more specific subclass
of `OpError` from the `tf.errors` module.
@@op
@@node_def
"""
def __init__(self, node_def, op, message, error_code):
"""Creates a new `OpError` indicating that a particular op failed.
Args:
node_def: The `node_def_pb2.NodeDef` proto representing the op that
failed, if known; otherwise None.
op: The `ops.Operation` that failed, if known; otherwise None.
message: The message string describing the failure.
error_code: The `error_codes_pb2.Code` describing the error.
"""
super(OpError, self).__init__()
self._message = message
self._node_def = node_def
self._op = op
self._error_code = error_code
@property
def message(self):
"""The error message that describes the error."""
return self._message
@property
def op(self):
"""The operation that failed, if known.
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
[`Operation`](../../api_docs/python/framework.md#Operation)
object. In that case, this will return `None`, and you should
instead use the [`OpError.node_def`](#OpError.node_def) to
discover information about the op.
Returns:
The `Operation` that failed, or None.
"""
return self._op
@property
def error_code(self):
"""The integer error code that describes the error."""
return self._error_code
@property
def node_def(self):
"""The `NodeDef` proto representing the op that failed."""
return self._node_def
def __str__(self):
if self._op is not None:
output = ["%s\n\nCaused by op %r, defined at:\n" % (self.message,
self._op.name,)]
curr_traceback_list = traceback.format_list(self._op.traceback)
output.extend(curr_traceback_list)
# pylint: disable=protected-access
original_op = self._op._original_op
# pylint: enable=protected-access
while original_op is not None:
output.append(
"\n...which was originally created as op %r, defined at:\n"
% (original_op.name,))
prev_traceback_list = curr_traceback_list
curr_traceback_list = traceback.format_list(original_op.traceback)
# Attempt to elide large common subsequences of the subsequent
# stack traces.
#
# TODO(mrry): Consider computing the actual longest common subsequence.
is_eliding = False
elide_count = 0
last_elided_line = None
for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list):
if line == line_in_prev:
if is_eliding:
elide_count += 1
last_elided_line = line
else:
output.append(line)
is_eliding = True
elide_count = 0
else:
if is_eliding:
if elide_count > 0:
output.extend(
["[elided %d identical lines from previous traceback]\n"
% (elide_count - 1,), last_elided_line])
is_eliding = False
output.extend(line)
# pylint: disable=protected-access
original_op = original_op._original_op
# pylint: enable=protected-access
output.append("\n%s (see above for traceback): %s\n" %
(type(self).__name__, self.message))
return "".join(output)
else:
return self.message
OK = error_codes_pb2.OK
CANCELLED = error_codes_pb2.CANCELLED
UNKNOWN = error_codes_pb2.UNKNOWN
INVALID_ARGUMENT = error_codes_pb2.INVALID_ARGUMENT
DEADLINE_EXCEEDED = error_codes_pb2.DEADLINE_EXCEEDED
NOT_FOUND = error_codes_pb2.NOT_FOUND
ALREADY_EXISTS = error_codes_pb2.ALREADY_EXISTS
PERMISSION_DENIED = error_codes_pb2.PERMISSION_DENIED
UNAUTHENTICATED = error_codes_pb2.UNAUTHENTICATED
RESOURCE_EXHAUSTED = error_codes_pb2.RESOURCE_EXHAUSTED
FAILED_PRECONDITION = error_codes_pb2.FAILED_PRECONDITION
ABORTED = error_codes_pb2.ABORTED
OUT_OF_RANGE = error_codes_pb2.OUT_OF_RANGE
UNIMPLEMENTED = error_codes_pb2.UNIMPLEMENTED
INTERNAL = error_codes_pb2.INTERNAL
UNAVAILABLE = error_codes_pb2.UNAVAILABLE
DATA_LOSS = error_codes_pb2.DATA_LOSS
# pylint: disable=line-too-long
class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
[`queue.enqueue()`](../../api_docs/python/io_ops.md#QueueBase.enqueue) may be
cancelled by running another operation (e.g.
[`queue.close(cancel_pending_enqueues=True)`](../../api_docs/python/io_ops.md#QueueBase.close),
or by [closing the session](../../api_docs/python/client.md#Session.close).
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `CancelledError`."""
super(CancelledError, self).__init__(node_def, op, message, CANCELLED)
# pylint: enable=line-too-long
class UnknownError(OpError):
"""Unknown error.
An example of where this error may be returned is if a Status value
received from another address space belongs to an error-space that
is not known to this address space. Also errors raised by APIs that
do not return enough error information may be converted to this
error.
@@__init__
"""
def __init__(self, node_def, op, message, error_code=UNKNOWN):
"""Creates an `UnknownError`."""
super(UnknownError, self).__init__(node_def, op, message, error_code)
class InvalidArgumentError(OpError):
"""Raised when an operation receives an invalid argument.
This may occur, for example, if an operation is receives an input
tensor that has an invalid value or shape. For example, the
[`tf.matmul()`](../../api_docs/python/math_ops.md#matmul) op will raise this
error if it receives an input that is not a matrix, and the
[`tf.reshape()`](../../api_docs/python/array_ops.md#reshape) op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InvalidArgumentError`."""
super(InvalidArgumentError, self).__init__(node_def, op, message,
INVALID_ARGUMENT)
class DeadlineExceededError(OpError):
"""Raised when a deadline expires before an operation could complete.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DeadlineExceededError`."""
super(DeadlineExceededError, self).__init__(node_def, op, message,
DEADLINE_EXCEEDED)
class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
[`tf.WholeFileReader.read()`](../../api_docs/python/io_ops.md#WholeFileReader)
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `NotFoundError`."""
super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND)
class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
(e.g. [`tf.train.Saver.save()`](../../api_docs/python/train.md#Saver.save))
could potentially raise this exception if an explicit filename for an
existing file was passed.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AlreadyExistsError`."""
super(AlreadyExistsError, self).__init__(node_def, op, message,
ALREADY_EXISTS)
class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
[`tf.WholeFileReader.read()`](../../api_docs/python/io_ops.md#WholeFileReader)
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `PermissionDeniedError`."""
super(PermissionDeniedError, self).__init__(node_def, op, message,
PERMISSION_DENIED)
class UnauthenticatedError(OpError):
"""The request does not have valid authentication credentials.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnauthenticatedError`."""
super(UnauthenticatedError, self).__init__(node_def, op, message,
UNAUTHENTICATED)
class ResourceExhaustedError(OpError):
"""Some resource has been exhausted.
For example, this error might be raised if a per-user quota is
exhausted, or perhaps the entire file system is out of space.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `ResourceExhaustedError`."""
super(ResourceExhaustedError, self).__init__(node_def, op, message,
RESOURCE_EXHAUSTED)
class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
that reads a [`tf.Variable`](../../api_docs/python/state_ops.md#Variable)
before it has been initialized.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `FailedPreconditionError`."""
super(FailedPreconditionError, self).__init__(node_def, op, message,
FAILED_PRECONDITION)
class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
[`queue.enqueue()`](../../api_docs/python/io_ops.md#QueueBase.enqueue)
operation may raise `AbortedError` if a
[`queue.close()`](../../api_docs/python/io_ops.md#QueueBase.close) operation
previously ran.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AbortedError`."""
super(AbortedError, self).__init__(node_def, op, message, ABORTED)
class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
[`queue.dequeue()`](../../api_docs/python/io_ops.md#QueueBase.dequeue)
operation is blocked on an empty queue, and a
[`queue.close()`](../../api_docs/python/io_ops.md#QueueBase.close)
operation executes.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `OutOfRangeError`."""
super(OutOfRangeError, self).__init__(node_def, op, message,
OUT_OF_RANGE)
class UnimplementedError(OpError):
"""Raised when an operation has not been implemented.
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
the [`tf.nn.max_pool()`](../../api_docs/python/nn.md#max_pool) operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnimplementedError`."""
super(UnimplementedError, self).__init__(node_def, op, message,
UNIMPLEMENTED)
class InternalError(OpError):
"""Raised when the system experiences an internal error.
This exception is raised when some invariant expected by the runtime
has been broken. Catching this exception is not recommended.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InternalError`."""
super(InternalError, self).__init__(node_def, op, message, INTERNAL)
class UnavailableError(OpError):
"""Raised when the runtime is currently unavailable.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnavailableError`."""
super(UnavailableError, self).__init__(node_def, op, message,
UNAVAILABLE)
class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
[`tf.WholeFileReader.read()`](../../api_docs/python/io_ops.md#WholeFileReader)
operation, if the file is truncated while it is being read.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DataLossError`."""
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)
_CODE_TO_EXCEPTION_CLASS = {
CANCELLED: CancelledError,
UNKNOWN: UnknownError,
INVALID_ARGUMENT: InvalidArgumentError,
DEADLINE_EXCEEDED: DeadlineExceededError,
NOT_FOUND: NotFoundError,
ALREADY_EXISTS: AlreadyExistsError,
PERMISSION_DENIED: PermissionDeniedError,
UNAUTHENTICATED: UnauthenticatedError,
RESOURCE_EXHAUSTED: ResourceExhaustedError,
FAILED_PRECONDITION: FailedPreconditionError,
ABORTED: AbortedError,
OUT_OF_RANGE: OutOfRangeError,
UNIMPLEMENTED: UnimplementedError,
INTERNAL: InternalError,
UNAVAILABLE: UnavailableError,
DATA_LOSS: DataLossError,
}
_EXCEPTION_CLASS_TO_CODE = dict((
(class_, code) for (code, class_) in _CODE_TO_EXCEPTION_CLASS.items()))
def exception_type_from_error_code(error_code):
return _CODE_TO_EXCEPTION_CLASS[error_code]
def error_code_from_exception_type(cls):
return _EXCEPTION_CLASS_TO_CODE[cls]
def _make_specific_exception(node_def, op, message, error_code):
try:
exc_type = exception_type_from_error_code(error_code)
return exc_type(node_def, op, message)
except KeyError:
warnings.warn("Unknown error code: %d" % error_code)
return UnknownError(node_def, op, message, error_code)
@contextlib.contextmanager
def raise_exception_on_not_ok_status():
status = pywrap_tensorflow.TF_NewStatus()
try:
yield status
if pywrap_tensorflow.TF_GetCode(status) != 0:
raise _make_specific_exception(
None, None,
compat.as_text(pywrap_tensorflow.TF_Message(status)),
pywrap_tensorflow.TF_GetCode(status))
finally:
pywrap_tensorflow.TF_DeleteStatus(status)
|
|
#
# Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Lisa Hsu
#
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
from Ruby import send_evicts
from topologies.Cluster import Cluster
from topologies.Crossbar import Crossbar
class CntrlBase:
_seqs = 0
@classmethod
def seqCount(cls):
# Use SeqCount not class since we need global count
CntrlBase._seqs += 1
return CntrlBase._seqs - 1
_cntrls = 0
@classmethod
def cntrlCount(cls):
# Use CntlCount not class since we need global count
CntrlBase._cntrls += 1
return CntrlBase._cntrls - 1
_version = 0
@classmethod
def versionCount(cls):
cls._version += 1 # Use count for this particular type
return cls._version - 1
class L1Cache(RubyCache):
resourceStalls = False
dataArrayBanks = 2
tagArrayBanks = 2
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class L2Cache(RubyCache):
resourceStalls = False
assoc = 16
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class CPCntrl(CorePair_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1Icache = L1Cache()
self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
self.L1D0cache = L1Cache()
self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
self.L1D1cache = L1Cache()
self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
self.L2cache = L2Cache()
self.L2cache.create(options.l2_size, options.l2_assoc, options)
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1Icache
self.sequencer.dcache = self.L1D0cache
self.sequencer.ruby_system = ruby_system
self.sequencer.coreid = 0
self.sequencer.is_cpu_sequencer = True
self.sequencer1 = RubySequencer()
self.sequencer1.version = self.seqCount()
self.sequencer1.icache = self.L1Icache
self.sequencer1.dcache = self.L1D1cache
self.sequencer1.ruby_system = ruby_system
self.sequencer1.coreid = 1
self.sequencer1.is_cpu_sequencer = True
self.issue_latency = options.cpu_to_dir_latency
self.send_evictions = send_evicts(options)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCPCache(RubyCache):
size = "16kB"
assoc = 16
dataArrayBanks = 16 #number of data banks
tagArrayBanks = 16 #number of tag banks
dataAccessLatency = 4
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.tcp_size)
self.assoc = options.tcp_assoc
self.resourceStalls = options.no_tcc_resource_stalls
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCPCntrl(TCP_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency,
dataAccessLatency = options.TCP_latency)
self.L1cache.resourceStalls = options.no_resource_stalls
self.L1cache.create(options)
self.issue_latency = 1
self.coalescer = VIPERCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def createCP(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency,
dataAccessLatency = options.TCP_latency)
self.L1cache.resourceStalls = options.no_resource_stalls
self.L1cache.create(options)
self.issue_latency = 1
self.coalescer = VIPERCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = True
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class SQCCache(RubyCache):
dataArrayBanks = 8
tagArrayBanks = 8
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.sqc_size)
self.assoc = options.sqc_assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class SQCCntrl(SQC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = SQCCache()
self.L1cache.create(options)
self.L1cache.resourceStalls = options.no_resource_stalls
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.support_data_reqs = False
self.sequencer.is_cpu_sequencer = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCC(RubyCache):
size = MemorySize("256kB")
assoc = 16
dataAccessLatency = 8
tagAccessLatency = 2
resourceStalls = True
def create(self, options):
self.assoc = options.tcc_assoc
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
s = options.num_compute_units
tcc_size = s * 128
tcc_size = str(tcc_size)+'kB'
self.size = MemorySize(tcc_size)
self.dataArrayBanks = 64
self.tagArrayBanks = 64
else:
self.size = MemorySize(options.tcc_size)
self.dataArrayBanks = 256 / options.num_tccs #number of data banks
self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
self.size.value = self.size.value / options.num_tccs
if ((self.size.value / long(self.assoc)) < 128):
self.size.value = long(128 * self.assoc)
self.start_index_bit = math.log(options.cacheline_size, 2) + \
math.log(options.num_tccs, 2)
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCCCntrl(TCC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L2cache = TCC()
self.L2cache.create(options)
self.L2cache.resourceStalls = options.no_tcc_resource_stalls
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class L3Cache(RubyCache):
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, options, ruby_system, system):
self.size = MemorySize(options.l3_size)
self.size.value /= options.num_dirs
self.assoc = options.l3_assoc
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataAccessLatency = options.l3_data_latency
self.tagAccessLatency = options.l3_tag_latency
self.resourceStalls = False
self.replacement_policy = PseudoLRUReplacementPolicy()
class L3Cntrl(L3Cache_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L3cache = L3Cache()
self.L3cache.create(options, ruby_system, system)
self.l3_response_latency = max(self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
class DirMem(RubyDirectoryMemory, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
phys_mem_size = AddrRange(options.mem_size).size()
mem_module_size = phys_mem_size / options.num_dirs
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
self.size = dir_size
class DirCntrl(Directory_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.response_latency = 30
self.directory = DirMem()
self.directory.create(options, ruby_system, system)
self.L3CacheMemory = L3Cache()
self.L3CacheMemory.create(options, ruby_system, system)
self.l3_hit_latency = max(self.L3CacheMemory.dataAccessLatency,
self.L3CacheMemory.tagAccessLatency)
self.number_of_TBEs = options.num_tbes
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
def define_options(parser):
parser.add_option("--num-subcaches", type = "int", default = 4)
parser.add_option("--l3-data-latency", type = "int", default = 20)
parser.add_option("--l3-tag-latency", type = "int", default = 15)
parser.add_option("--cpu-to-dir-latency", type = "int", default = 120)
parser.add_option("--gpu-to-dir-latency", type = "int", default = 120)
parser.add_option("--no-resource-stalls", action = "store_false",
default = True)
parser.add_option("--no-tcc-resource-stalls", action = "store_false",
default = True)
parser.add_option("--use-L3-on-WT", action = "store_true", default = False)
parser.add_option("--num-tbes", type = "int", default = 256)
parser.add_option("--l2-latency", type = "int", default = 50) # load to use
parser.add_option("--num-tccs", type = "int", default = 1,
help = "number of TCC banks in the GPU")
parser.add_option("--sqc-size", type = 'string', default = '32kB',
help = "SQC cache size")
parser.add_option("--sqc-assoc", type = 'int', default = 8,
help = "SQC cache assoc")
parser.add_option("--WB_L1", action = "store_true", default = False,
help = "writeback L1")
parser.add_option("--WB_L2", action = "store_true", default = False,
help = "writeback L2")
parser.add_option("--TCP_latency", type = "int", default = 4,
help = "TCP latency")
parser.add_option("--TCC_latency", type = "int", default = 16,
help = "TCC latency")
parser.add_option("--tcc-size", type = 'string', default = '256kB',
help = "agregate tcc size")
parser.add_option("--tcc-assoc", type = 'int', default = 16,
help = "tcc assoc")
parser.add_option("--tcp-size", type = 'string', default = '16kB',
help = "tcp size")
parser.add_option("--tcp-assoc", type = 'int', default = 16,
help = "tcp assoc")
parser.add_option("--noL1", action = "store_true", default = False,
help = "bypassL1")
def create_system(options, full_system, system, dma_devices, ruby_system):
if buildEnv['PROTOCOL'] != 'GPU_VIPER':
panic("This script requires the GPU_VIPER protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes
# must be listed before the directory nodes and directory nodes before
# dma nodes, etc.
#
cp_cntrl_nodes = []
tcp_cntrl_nodes = []
sqc_cntrl_nodes = []
tcc_cntrl_nodes = []
dir_cntrl_nodes = []
l3_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
# For an odd number of CPUs, still create the right number of controllers
TCC_bits = int(math.log(options.num_tccs, 2))
# This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
# Clusters
crossbar_bw = None
mainCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
#Assuming a 2GHz clock
crossbar_bw = 16 * options.num_compute_units * options.bw_scalor
mainCluster = Cluster(intBW=crossbar_bw)
else:
mainCluster = Cluster(intBW=8) # 16 GB/s
for i in xrange(options.num_dirs):
dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits)
dir_cntrl.create(options, ruby_system, system)
dir_cntrl.number_of_TBEs = options.num_tbes
dir_cntrl.useL3OnWT = options.use_L3_on_WT
# the number_of_TBEs is inclusive of TBEs below
# Connect the Directory controller to the ruby network
dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
dir_cntrl.requestFromCores.slave = ruby_system.network.master
dir_cntrl.responseFromCores = MessageBuffer()
dir_cntrl.responseFromCores.slave = ruby_system.network.master
dir_cntrl.unblockFromCores = MessageBuffer()
dir_cntrl.unblockFromCores.slave = ruby_system.network.master
dir_cntrl.probeToCore = MessageBuffer()
dir_cntrl.probeToCore.master = ruby_system.network.slave
dir_cntrl.responseToCore = MessageBuffer()
dir_cntrl.responseToCore.master = ruby_system.network.slave
dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.responseFromMemory = MessageBuffer()
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
mainCluster.add(dir_cntrl)
cpuCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else:
cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
for i in xrange((options.num_cpus + 1) / 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
exec("ruby_system.cp_cntrl%d = cp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
# Connect the CP controllers and the network
cp_cntrl.requestFromCore = MessageBuffer()
cp_cntrl.requestFromCore.master = ruby_system.network.slave
cp_cntrl.responseFromCore = MessageBuffer()
cp_cntrl.responseFromCore.master = ruby_system.network.slave
cp_cntrl.unblockFromCore = MessageBuffer()
cp_cntrl.unblockFromCore.master = ruby_system.network.slave
cp_cntrl.probeToCore = MessageBuffer()
cp_cntrl.probeToCore.slave = ruby_system.network.master
cp_cntrl.responseToCore = MessageBuffer()
cp_cntrl.responseToCore.slave = ruby_system.network.master
cp_cntrl.mandatoryQueue = MessageBuffer()
cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
cpuCluster.add(cp_cntrl)
gpuCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else:
gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
for i in xrange(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
number_of_TBEs = 2560)
# TBEs set to max outstanding requests
tcp_cntrl.create(options, ruby_system, system)
tcp_cntrl.WB = options.WB_L1
tcp_cntrl.disableL1 = options.noL1
tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency
exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.coalescer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the TCP controller to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer()
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# Connect the SQC controller to the ruby network
sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
sqc_cntrl.probeToSQC.slave = ruby_system.network.master
sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
sqc_cntrl.responseToSQC.slave = ruby_system.network.master
sqc_cntrl.mandatoryQueue = MessageBuffer()
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_cp):
tcp_ID = options.num_compute_units + i
sqc_ID = options.num_sqc + i
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
number_of_TBEs = 2560)
# TBEs set to max outstanding requests
tcp_cntrl.createCP(options, ruby_system, system)
tcp_cntrl.WB = options.WB_L1
tcp_cntrl.disableL1 = options.noL1
tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency
exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % tcp_ID)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.sequencer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the CP (TCP) controllers to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % sqc_ID)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_tccs):
tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency)
tcc_cntrl.create(options, ruby_system, system)
tcc_cntrl.l2_request_latency = options.gpu_to_dir_latency
tcc_cntrl.l2_response_latency = options.TCC_latency
tcc_cntrl_nodes.append(tcc_cntrl)
tcc_cntrl.WB = options.WB_L2
tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units
# the number_of_TBEs is inclusive of TBEs below
# Connect the TCC controllers to the ruby network
tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcc_cntrl.requestFromTCP.slave = ruby_system.network.master
tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
tcc_cntrl.responseToCore.master = ruby_system.network.slave
tcc_cntrl.probeFromNB = MessageBuffer()
tcc_cntrl.probeFromNB.slave = ruby_system.network.master
tcc_cntrl.responseFromNB = MessageBuffer()
tcc_cntrl.responseFromNB.slave = ruby_system.network.master
tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
tcc_cntrl.requestToNB.master = ruby_system.network.slave
tcc_cntrl.responseToNB = MessageBuffer()
tcc_cntrl.responseToNB.master = ruby_system.network.slave
tcc_cntrl.unblockToNB = MessageBuffer()
tcc_cntrl.unblockToNB.master = ruby_system.network.slave
tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)
exec("ruby_system.tcc_cntrl%d = tcc_cntrl" % i)
# connect all of the wire buffers between L3 and dirs up
# TCC cntrls added to the GPU cluster
gpuCluster.add(tcc_cntrl)
# Assuming no DMA devices
assert(len(dma_devices) == 0)
# Add cpu/gpu clusters to main cluster
mainCluster.add(cpuCluster)
mainCluster.add(gpuCluster)
ruby_system.network.number_of_virtual_networks = 10
return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
|
|
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FormPost Middleware
Translates a browser form post into a regular Swift object PUT.
The format of the form is::
<form action="<swift-url>" method="POST"
enctype="multipart/form-data">
<input type="hidden" name="redirect" value="<redirect-url>" />
<input type="hidden" name="max_file_size" value="<bytes>" />
<input type="hidden" name="max_file_count" value="<count>" />
<input type="hidden" name="expires" value="<unix-timestamp>" />
<input type="hidden" name="signature" value="<hmac>" />
<input type="file" name="file1" /><br />
<input type="submit" />
</form>
The <swift-url> is the URL to the Swift desination, such as::
https://swift-cluster.example.com/v1/AUTH_account/container/object_prefix
The name of each file uploaded will be appended to the <swift-url>
given. So, you can upload directly to the root of container with a
url like::
https://swift-cluster.example.com/v1/AUTH_account/container/
Optionally, you can include an object prefix to better separate
different users' uploads, such as::
https://swift-cluster.example.com/v1/AUTH_account/container/object_prefix
Note the form method must be POST and the enctype must be set as
"multipart/form-data".
The redirect attribute is the URL to redirect the browser to after
the upload completes. The URL will have status and message query
parameters added to it, indicating the HTTP status code for the
upload (2xx is success) and a possible message for further
information if there was an error (such as "max_file_size exceeded").
The max_file_size attribute must be included and indicates the
largest single file upload that can be done, in bytes.
The max_file_count attribute must be included and indicates the
maximum number of files that can be uploaded with the form. Include
additional ``<input type="file" name="filexx" />`` attributes if
desired.
The expires attribute is the Unix timestamp before which the form
must be submitted before it's invalidated.
The signature attribute is the HMAC-SHA1 signature of the form. Here is
sample code for computing the signature::
import hmac
from hashlib import sha1
from time import time
path = '/v1/account/container/object_prefix'
redirect = 'https://myserver.com/some-page'
max_file_size = 104857600
max_file_count = 10
expires = int(time() + 600)
key = 'mykey'
hmac_body = '%s\\n%s\\n%s\\n%s\\n%s' % (path, redirect,
max_file_size, max_file_count, expires)
signature = hmac.new(key, hmac_body, sha1).hexdigest()
The key is the value of the X-Account-Meta-Temp-URL-Key header on the
account.
Be certain to use the full path, from the /v1/ onward.
The command line tool ``swift-form-signature`` may be used (mostly
just when testing) to compute expires and signature.
Also note that the file attributes must be after the other attributes
in order to be processed correctly. If attributes come after the
file, they won't be sent with the subrequest (there is no way to
parse all the attributes on the server-side without reading the whole
thing into memory -- to service many requests, some with large files,
there just isn't enough memory on the server, so attributes following
the file are simply ignored).
"""
__all__ = ['FormPost', 'filter_factory', 'READ_CHUNK_SIZE', 'MAX_VALUE_LENGTH']
import hmac
import re
import rfc822
from hashlib import sha1
from StringIO import StringIO
from time import gmtime, strftime, time
from time import time
from urllib import quote, unquote
from swift.common.utils import get_logger, streq_const_time
from swift.common.wsgi import make_pre_authed_env
#: The size of data to read from the form at any given time.
READ_CHUNK_SIZE = 4096
#: The maximum size of any attribute's value. Any additional data will be
#: truncated.
MAX_VALUE_LENGTH = 4096
#: Regular expression to match form attributes.
ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)')
class FormInvalid(Exception):
pass
def _parse_attrs(header):
"""
Given the value of a header like:
Content-Disposition: form-data; name="somefile"; filename="test.html"
Return data like
("form-data", {"name": "somefile", "filename": "test.html"})
:param header: Value of a header (the part after the ': ').
:returns: (value name, dict) of the attribute data parsed (see above).
"""
attributes = {}
attrs = ''
if '; ' in header:
header, attrs = header.split('; ', 1)
m = True
while m:
m = ATTRIBUTES_RE.match(attrs)
if m:
attrs = attrs[len(m.group(0)):]
attributes[m.group(1)] = m.group(2).strip('"')
return header, attributes
class _IterRequestsFileLikeObject(object):
def __init__(self, wsgi_input, boundary, input_buffer):
self.no_more_data_for_this_file = False
self.no_more_files = False
self.wsgi_input = wsgi_input
self.boundary = boundary
self.input_buffer = input_buffer
def read(self, length=None):
if not length:
length = READ_CHUNK_SIZE
if self.no_more_data_for_this_file:
return ''
# read enough data to know whether we're going to run
# into a boundary in next [length] bytes
if len(self.input_buffer) < length + len(self.boundary) + 2:
to_read = length + len(self.boundary) + 2
while to_read > 0:
chunk = self.wsgi_input.read(to_read)
to_read -= len(chunk)
self.input_buffer += chunk
if not chunk:
self.no_more_files = True
break
boundary_pos = self.input_buffer.find(self.boundary)
# boundary does not exist in the next (length) bytes
if boundary_pos == -1 or boundary_pos > length:
ret = self.input_buffer[:length]
self.input_buffer = self.input_buffer[length:]
# if it does, just return data up to the boundary
else:
ret, self.input_buffer = self.input_buffer.split(self.boundary, 1)
self.no_more_files = self.input_buffer.startswith('--')
self.no_more_data_for_this_file = True
self.input_buffer = self.input_buffer[2:]
return ret
def readline(self):
if self.no_more_data_for_this_file:
return ''
boundary_pos = newline_pos = -1
while newline_pos < 0 and boundary_pos < 0:
chunk = self.wsgi_input.read(READ_CHUNK_SIZE)
self.input_buffer += chunk
newline_pos = self.input_buffer.find('\r\n')
boundary_pos = self.input_buffer.find(self.boundary)
if not chunk:
self.no_more_files = True
break
# found a newline
if newline_pos >= 0 and \
(boundary_pos < 0 or newline_pos < boundary_pos):
# Use self.read to ensure any logic there happens...
ret = ''
to_read = newline_pos + 2
while to_read > 0:
chunk = self.read(to_read)
# Should never happen since we're reading from input_buffer,
# but just for completeness...
if not chunk:
break
to_read -= len(chunk)
ret += chunk
return ret
else: # no newlines, just return up to next boundary
return self.read(len(self.input_buffer))
def _iter_requests(wsgi_input, boundary):
"""
Given a multi-part mime encoded input file object and boundary,
yield file-like objects for each part.
:param wsgi_input: The file-like object to read from.
:param boundary: The mime boundary to separate new file-like
objects on.
:returns: A generator of file-like objects for each part.
"""
boundary = '--' + boundary
if wsgi_input.readline().strip() != boundary:
raise FormInvalid('invalid starting boundary')
boundary = '\r\n' + boundary
input_buffer = ''
done = False
while not done:
it = _IterRequestsFileLikeObject(wsgi_input, boundary, input_buffer)
yield it
done = it.no_more_files
input_buffer = it.input_buffer
class _CappedFileLikeObject(object):
"""
A file-like object wrapping another file-like object that raises
an EOFError if the amount of data read exceeds a given
max_file_size.
:param fp: The file-like object to wrap.
:param max_file_size: The maximum bytes to read before raising an
EOFError.
"""
def __init__(self, fp, max_file_size):
self.fp = fp
self.max_file_size = max_file_size
self.amount_read = 0
def read(self, size=None):
ret = self.fp.read(size)
self.amount_read += len(ret)
if self.amount_read > self.max_file_size:
raise EOFError('max_file_size exceeded')
return ret
def readline(self):
ret = self.fp.readline()
self.amount_read += len(ret)
if self.amount_read > self.max_file_size:
raise EOFError('max_file_size exceeded')
return ret
class FormPost(object):
"""
FormPost Middleware
See above for a full description.
:param app: The next WSGI filter or app in the paste.deploy
chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
#: The next WSGI application/filter in the paste.deploy pipeline.
self.app = app
#: The filter configuration dict.
self.conf = conf
#: The logger to use with this middleware.
self.logger = get_logger(conf, log_route='formpost')
#: The HTTP user agent to use with subrequests.
self.agent = '%(orig)s FormPost'
def __call__(self, env, start_response):
"""
Main hook into the WSGI paste.deploy filter/app pipeline.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
:returns: Response as per WSGI.
"""
if env['REQUEST_METHOD'] == 'POST':
try:
content_type, attrs = \
_parse_attrs(env.get('CONTENT_TYPE') or '')
if content_type == 'multipart/form-data' and \
'boundary' in attrs:
resp_status = [0]
def _start_response(status, headers, exc_info=None):
resp_status[0] = int(status.split(' ', 1)[0])
start_response(status, headers, exc_info)
self._log_request(env, resp_status)
return self._translate_form(env, start_response,
attrs['boundary'])
except (FormInvalid, EOFError), err:
self._log_request(env, 400)
body = 'FormPost: %s' % err
start_response('400 Bad Request',
(('Content-Type', 'text/plain'),
('Content-Length', str(len(body)))))
return [body]
return self.app(env, start_response)
def _translate_form(self, env, start_response, boundary):
"""
Translates the form data into subrequests and issues a
response.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
:returns: Response as per WSGI.
"""
key = self._get_key(env)
status = message = ''
attributes = {}
file_count = 0
for fp in _iter_requests(env['wsgi.input'], boundary):
hdrs = rfc822.Message(fp, 0)
disp, attrs = \
_parse_attrs(hdrs.getheader('Content-Disposition', ''))
if disp == 'form-data' and attrs.get('filename'):
file_count += 1
try:
if file_count > int(attributes.get('max_file_count') or 0):
status = '400 Bad Request'
message = 'max file count exceeded'
break
except ValueError:
raise FormInvalid('max_file_count not an integer')
attributes['filename'] = attrs['filename'] or 'filename'
if 'content-type' not in attributes and 'content-type' in hdrs:
attributes['content-type'] = \
hdrs['Content-Type'] or 'application/octet-stream'
status, message = self._perform_subrequest(env, start_response,
attributes, fp, key)
if status[:1] != '2':
break
else:
data = ''
mxln = MAX_VALUE_LENGTH
while mxln:
chunk = fp.read(mxln)
if not chunk:
break
mxln -= len(chunk)
data += chunk
while fp.read(READ_CHUNK_SIZE):
pass
if 'name' in attrs:
attributes[attrs['name'].lower()] = data.rstrip('\r\n--')
if not status:
status = '400 Bad Request'
message = 'no files to process'
if not attributes.get('redirect'):
body = status
if message:
body = status + '\r\nFormPost: ' + message.title()
start_response(status, [('Content-Type', 'text/plain'),
('Content-Length', len(body))])
return [body]
status = status.split(' ', 1)[0]
body = '<html><body><p><a href="%s?status=%s&message=%s">Click to ' \
'continue...</a></p></body></html>' % \
(attributes['redirect'], quote(status), quote(message))
start_response('303 See Other',
[('Location', '%s?status=%s&message=%s' %
(attributes['redirect'], quote(status), quote(message))),
('Content-Length', str(len(body)))])
return [body]
def _perform_subrequest(self, env, start_response, attributes, fp, key):
"""
Performs the subrequest and returns a new response.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
:param attributes: dict of the attributes of the form so far.
:param fp: The file-like object containing the request body.
:param key: The account key to validate the signature with.
:returns: Response as per WSGI.
"""
if not key:
return '401 Unauthorized', 'invalid signature'
try:
max_file_size = int(attributes.get('max_file_size') or 0)
except ValueError:
raise FormInvalid('max_file_size not an integer')
subenv = make_pre_authed_env(env, 'PUT', agent=self.agent)
subenv['HTTP_TRANSFER_ENCODING'] = 'chunked'
subenv['wsgi.input'] = _CappedFileLikeObject(fp, max_file_size)
if subenv['PATH_INFO'][-1] != '/' and \
subenv['PATH_INFO'].count('/') < 4:
subenv['PATH_INFO'] += '/'
subenv['PATH_INFO'] += attributes['filename'] or 'filename'
if 'content-type' in attributes:
subenv['CONTENT_TYPE'] = \
attributes['content-type'] or 'application/octet-stream'
elif 'CONTENT_TYPE' in subenv:
del subenv['CONTENT_TYPE']
try:
if int(attributes.get('expires') or 0) < time():
return '401 Unauthorized', 'form expired'
except ValueError:
raise FormInvalid('expired not an integer')
hmac_body = '%s\n%s\n%s\n%s\n%s' % (
env['PATH_INFO'],
attributes.get('redirect') or '',
attributes.get('max_file_size') or '0',
attributes.get('max_file_count') or '0',
attributes.get('expires') or '0'
)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
if not streq_const_time(sig, (attributes.get('signature') or
'invalid')):
return '401 Unauthorized', 'invalid signature'
substatus = [None]
def _start_response(status, headers, exc_info=None):
substatus[0] = status
i = iter(self.app(subenv, _start_response))
try:
i.next()
except StopIteration:
pass
return substatus[0], ''
def _get_key(self, env):
"""
Returns the X-Account-Meta-Temp-URL-Key header value for the
account, or None if none is set.
:param env: The WSGI environment for the request.
:returns: X-Account-Meta-Temp-URL-Key str value, or None.
"""
parts = env['PATH_INFO'].split('/', 4)
if len(parts) < 4 or parts[0] or parts[1] != 'v1' or not parts[2] or \
not parts[3]:
return None
account = parts[2]
key = None
memcache = env.get('swift.cache')
if memcache:
key = memcache.get('temp-url-key/%s' % account)
if not key:
newenv = make_pre_authed_env(env, 'HEAD', '/v1/' + account,
self.agent)
newenv['CONTENT_LENGTH'] = '0'
newenv['wsgi.input'] = StringIO('')
key = [None]
def _start_response(status, response_headers, exc_info=None):
for h, v in response_headers:
if h.lower() == 'x-account-meta-temp-url-key':
key[0] = v
i = iter(self.app(newenv, _start_response))
try:
i.next()
except StopIteration:
pass
key = key[0]
if key and memcache:
memcache.set('temp-url-key/%s' % account, key, timeout=60)
return key
def _log_request(self, env, response_status_int):
"""
Used when a request might not be logged by the underlying
WSGI application, but we'd still like to record what
happened. An early 401 Unauthorized is a good example of
this.
:param env: The WSGI environment for the request.
:param response_status_int: The HTTP status we'll be replying
to the request with.
"""
the_request = quote(unquote(env.get('PATH_INFO') or '/'))
if env.get('QUERY_STRING'):
the_request = the_request + '?' + env['QUERY_STRING']
client = env.get('HTTP_X_CLUSTER_CLIENT_IP')
if not client and 'HTTP_X_FORWARDED_FOR' in env:
# remote host for other lbs
client = env['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
if not client:
client = env.get('REMOTE_ADDR')
self.logger.info(' '.join(quote(str(x)) for x in (
client or '-',
env.get('REMOTE_ADDR') or '-',
strftime('%d/%b/%Y/%H/%M/%S', gmtime()),
env.get('REQUEST_METHOD') or 'GET',
the_request,
env.get('SERVER_PROTOCOL') or '1.0',
response_status_int,
env.get('HTTP_REFERER') or '-',
(env.get('HTTP_USER_AGENT') or '-') + ' FormPOST',
env.get('HTTP_X_AUTH_TOKEN') or '-',
'-',
'-',
'-',
env.get('swift.trans_id') or '-',
'-',
'-',
)))
def filter_factory(global_conf, **local_conf):
""" Returns the WSGI filter for use with paste.deploy. """
conf = global_conf.copy()
conf.update(local_conf)
return lambda app: FormPost(app, conf)
|
|
# merge.py - directory-level update/merge handling for Mercurial
#
# Copyright 2006, 2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullid, nullrev, hex, bin
from i18n import _
from mercurial import obsolete
import error, util, filemerge, copies, subrepo, worker, dicthelpers
import errno, os, shutil
class mergestate(object):
'''track 3-way merge state of individual files'''
def __init__(self, repo):
self._repo = repo
self._dirty = False
self._read()
def reset(self, node=None):
self._state = {}
if node:
self._local = node
shutil.rmtree(self._repo.join("merge"), True)
self._dirty = False
def _read(self):
self._state = {}
try:
f = self._repo.opener("merge/state")
for i, l in enumerate(f):
if i == 0:
self._local = bin(l[:-1])
else:
bits = l[:-1].split("\0")
self._state[bits[0]] = bits[1:]
f.close()
except IOError, err:
if err.errno != errno.ENOENT:
raise
self._dirty = False
def commit(self):
if self._dirty:
f = self._repo.opener("merge/state", "w")
f.write(hex(self._local) + "\n")
for d, v in self._state.iteritems():
f.write("\0".join([d] + v) + "\n")
f.close()
self._dirty = False
def add(self, fcl, fco, fca, fd):
hash = util.sha1(fcl.path()).hexdigest()
self._repo.opener.write("merge/" + hash, fcl.data())
self._state[fd] = ['u', hash, fcl.path(), fca.path(),
hex(fca.filenode()), fco.path(), fcl.flags()]
self._dirty = True
def __contains__(self, dfile):
return dfile in self._state
def __getitem__(self, dfile):
return self._state[dfile][0]
def __iter__(self):
l = self._state.keys()
l.sort()
for f in l:
yield f
def mark(self, dfile, state):
self._state[dfile][0] = state
self._dirty = True
def resolve(self, dfile, wctx, octx):
if self[dfile] == 'r':
return 0
state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
fcd = wctx[dfile]
fco = octx[ofile]
fca = self._repo.filectx(afile, fileid=anode)
# "premerge" x flags
flo = fco.flags()
fla = fca.flags()
if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
if fca.node() == nullid:
self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
afile)
elif flags == fla:
flags = flo
# restore local
f = self._repo.opener("merge/" + hash)
self._repo.wwrite(dfile, f.read(), flags)
f.close()
r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
if r is None:
# no real conflict
del self._state[dfile]
elif not r:
self.mark(dfile, 'r')
return r
def _checkunknownfile(repo, wctx, mctx, f):
return (not repo.dirstate._ignore(f)
and os.path.isfile(repo.wjoin(f))
and repo.dirstate.normalize(f) not in repo.dirstate
and mctx[f].cmp(wctx[f]))
def _checkunknown(repo, wctx, mctx):
"check for collisions between unknown files and files in mctx"
error = False
for f in mctx:
if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
error = True
wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
if error:
raise util.Abort(_("untracked files in working directory differ "
"from files in requested revision"))
def _forgetremoved(wctx, mctx, branchmerge):
"""
Forget removed files
If we're jumping between revisions (as opposed to merging), and if
neither the working directory nor the target rev has the file,
then we need to remove it from the dirstate, to prevent the
dirstate from listing the file when it is no longer in the
manifest.
If we're merging, and the other revision has removed a file
that is not present in the working directory, we need to mark it
as removed.
"""
actions = []
state = branchmerge and 'r' or 'f'
for f in wctx.deleted():
if f not in mctx:
actions.append((f, state, None, "forget deleted"))
if not branchmerge:
for f in wctx.removed():
if f not in mctx:
actions.append((f, "f", None, "forget removed"))
return actions
def _checkcollision(repo, wmf, actions, prompts):
# build provisional merged manifest up
pmmf = set(wmf)
def addop(f, args):
pmmf.add(f)
def removeop(f, args):
pmmf.discard(f)
def nop(f, args):
pass
def renameop(f, args):
f2, fd, flags = args
if f:
pmmf.discard(f)
pmmf.add(fd)
def mergeop(f, args):
f2, fd, move = args
if move:
pmmf.discard(f)
pmmf.add(fd)
opmap = {
"a": addop,
"d": renameop,
"dr": nop,
"e": nop,
"f": addop, # untracked file should be kept in working directory
"g": addop,
"m": mergeop,
"r": removeop,
"rd": nop,
}
for f, m, args, msg in actions:
op = opmap.get(m)
assert op, m
op(f, args)
opmap = {
"cd": addop,
"dc": addop,
}
for f, m in prompts:
op = opmap.get(m)
assert op, m
op(f, None)
# check case-folding collision in provisional merged manifest
foldmap = {}
for f in sorted(pmmf):
fold = util.normcase(f)
if fold in foldmap:
raise util.Abort(_("case-folding collision between %s and %s")
% (f, foldmap[fold]))
foldmap[fold] = f
def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
acceptremote=False):
"""
Merge p1 and p2 with ancestor pa and generate merge action list
branchmerge and force are as passed in to update
partial = function to filter file lists
acceptremote = accept the incoming changes without prompting
"""
overwrite = force and not branchmerge
actions, copy, movewithdir = [], {}, {}
followcopies = False
if overwrite:
pa = wctx
elif pa == p2: # backwards
pa = wctx.p1()
elif not branchmerge and not wctx.dirty(missing=True):
pass
elif pa and repo.ui.configbool("merge", "followcopies", True):
followcopies = True
# manifests fetched in order are going to be faster, so prime the caches
[x.manifest() for x in
sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
if followcopies:
ret = copies.mergecopies(repo, wctx, p2, pa)
copy, movewithdir, diverge, renamedelete = ret
for of, fl in diverge.iteritems():
actions.append((of, "dr", (fl,), "divergent renames"))
for of, fl in renamedelete.iteritems():
actions.append((of, "rd", (fl,), "rename and delete"))
repo.ui.note(_("resolving manifests\n"))
repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
% (bool(branchmerge), bool(force), bool(partial)))
repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
copied = set(copy.values())
copied.update(movewithdir.values())
if '.hgsubstate' in m1:
# check whether sub state is modified
for s in sorted(wctx.substate):
if wctx.sub(s).dirty():
m1['.hgsubstate'] += "+"
break
aborts, prompts = [], []
# Compare manifests
fdiff = dicthelpers.diff(m1, m2)
flagsdiff = m1.flagsdiff(m2)
diff12 = dicthelpers.join(fdiff, flagsdiff)
for f, (n12, fl12) in diff12.iteritems():
if n12:
n1, n2 = n12
else: # file contents didn't change, but flags did
n1 = n2 = m1.get(f, None)
if n1 is None:
# Since n1 == n2, the file isn't present in m2 either. This
# means that the file was removed or deleted locally and
# removed remotely, but that residual entries remain in flags.
# This can happen in manifests generated by workingctx.
continue
if fl12:
fl1, fl2 = fl12
else: # flags didn't change, file contents did
fl1 = fl2 = m1.flags(f)
if partial and not partial(f):
continue
if n1 and n2:
fla = ma.flags(f)
nol = 'l' not in fl1 + fl2 + fla
a = ma.get(f, nullid)
if n2 == a and fl2 == fla:
pass # remote unchanged - keep local
elif n1 == a and fl1 == fla: # local unchanged - use remote
if n1 == n2: # optimization: keep local content
actions.append((f, "e", (fl2,), "update permissions"))
else:
actions.append((f, "g", (fl2,), "remote is newer"))
elif nol and n2 == a: # remote only changed 'x'
actions.append((f, "e", (fl2,), "update permissions"))
elif nol and n1 == a: # local only changed 'x'
actions.append((f, "g", (fl1,), "remote is newer"))
else: # both changed something
actions.append((f, "m", (f, f, False), "versions differ"))
elif f in copied: # files we'll deal with on m2 side
pass
elif n1 and f in movewithdir: # directory rename
f2 = movewithdir[f]
actions.append((f, "d", (None, f2, fl1),
"remote renamed directory to " + f2))
elif n1 and f in copy:
f2 = copy[f]
actions.append((f, "m", (f2, f, False),
"local copied/moved to " + f2))
elif n1 and f in ma: # clean, a different, no remote
if n1 != ma[f]:
prompts.append((f, "cd")) # prompt changed/deleted
elif n1[20:] == "a": # added, no remote
actions.append((f, "f", None, "remote deleted"))
else:
actions.append((f, "r", None, "other deleted"))
elif n2 and f in movewithdir:
f2 = movewithdir[f]
actions.append((None, "d", (f, f2, fl2),
"local renamed directory to " + f2))
elif n2 and f in copy:
f2 = copy[f]
if f2 in m2:
actions.append((f2, "m", (f, f, False),
"remote copied to " + f))
else:
actions.append((f2, "m", (f, f, True),
"remote moved to " + f))
elif n2 and f not in ma:
# local unknown, remote created: the logic is described by the
# following table:
#
# force branchmerge different | action
# n * n | get
# n * y | abort
# y n * | get
# y y n | get
# y y y | merge
#
# Checking whether the files are different is expensive, so we
# don't do that when we can avoid it.
if force and not branchmerge:
actions.append((f, "g", (fl2,), "remote created"))
else:
different = _checkunknownfile(repo, wctx, p2, f)
if force and branchmerge and different:
actions.append((f, "m", (f, f, False),
"remote differs from untracked local"))
elif not force and different:
aborts.append((f, "ud"))
else:
actions.append((f, "g", (fl2,), "remote created"))
elif n2 and n2 != ma[f]:
prompts.append((f, "dc")) # prompt deleted/changed
for f, m in sorted(aborts):
if m == "ud":
repo.ui.warn(_("%s: untracked file differs\n") % f)
else: assert False, m
if aborts:
raise util.Abort(_("untracked files in working directory differ "
"from files in requested revision"))
if not util.checkcase(repo.path):
# check collision between files only in p2 for clean update
if (not branchmerge and
(force or not wctx.dirty(missing=True, branch=False))):
_checkcollision(repo, m2, [], [])
else:
_checkcollision(repo, m1, actions, prompts)
for f, m in sorted(prompts):
if m == "cd":
if acceptremote:
actions.append((f, "r", None, "remote delete"))
elif repo.ui.promptchoice(
_("local changed %s which remote deleted\n"
"use (c)hanged version or (d)elete?") % f,
(_("&Changed"), _("&Delete")), 0):
actions.append((f, "r", None, "prompt delete"))
else:
actions.append((f, "a", None, "prompt keep"))
elif m == "dc":
if acceptremote:
actions.append((f, "g", (m2.flags(f),), "remote recreating"))
elif repo.ui.promptchoice(
_("remote changed %s which local deleted\n"
"use (c)hanged version or leave (d)eleted?") % f,
(_("&Changed"), _("&Deleted")), 0) == 0:
actions.append((f, "g", (m2.flags(f),), "prompt recreating"))
else: assert False, m
return actions
def actionkey(a):
return a[1] == "r" and -1 or 0, a
def getremove(repo, mctx, overwrite, args):
"""apply usually-non-interactive updates to the working directory
mctx is the context to be merged into the working copy
yields tuples for progress updates
"""
verbose = repo.ui.verbose
unlink = util.unlinkpath
wjoin = repo.wjoin
fctx = mctx.filectx
wwrite = repo.wwrite
audit = repo.wopener.audit
i = 0
for arg in args:
f = arg[0]
if arg[1] == 'r':
if verbose:
repo.ui.note(_("removing %s\n") % f)
audit(f)
try:
unlink(wjoin(f), ignoremissing=True)
except OSError, inst:
repo.ui.warn(_("update failed to remove %s: %s!\n") %
(f, inst.strerror))
else:
if verbose:
repo.ui.note(_("getting %s\n") % f)
wwrite(f, fctx(f).data(), arg[2][0])
if i == 100:
yield i, f
i = 0
i += 1
if i > 0:
yield i, f
def applyupdates(repo, actions, wctx, mctx, actx, overwrite):
"""apply the merge action list to the working directory
wctx is the working copy context
mctx is the context to be merged into the working copy
actx is the context of the common ancestor
Return a tuple of counts (updated, merged, removed, unresolved) that
describes how many files were affected by the update.
"""
updated, merged, removed, unresolved = 0, 0, 0, 0
ms = mergestate(repo)
ms.reset(wctx.p1().node())
moves = []
actions.sort(key=actionkey)
# prescan for merges
for a in actions:
f, m, args, msg = a
repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
if m == "m": # merge
f2, fd, move = args
if fd == '.hgsubstate': # merged internally
continue
repo.ui.debug(" preserving %s for resolve of %s\n" % (f, fd))
fcl = wctx[f]
fco = mctx[f2]
if mctx == actx: # backwards, use working dir parent as ancestor
if fcl.parents():
fca = fcl.p1()
else:
fca = repo.filectx(f, fileid=nullrev)
else:
fca = fcl.ancestor(fco, actx)
if not fca:
fca = repo.filectx(f, fileid=nullrev)
ms.add(fcl, fco, fca, fd)
if f != fd and move:
moves.append(f)
audit = repo.wopener.audit
# remove renamed files after safely stored
for f in moves:
if os.path.lexists(repo.wjoin(f)):
repo.ui.debug("removing %s\n" % f)
audit(f)
util.unlinkpath(repo.wjoin(f))
numupdates = len(actions)
workeractions = [a for a in actions if a[1] in 'gr']
updateactions = [a for a in workeractions if a[1] == 'g']
updated = len(updateactions)
removeactions = [a for a in workeractions if a[1] == 'r']
removed = len(removeactions)
actions = [a for a in actions if a[1] not in 'gr']
hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
if hgsub and hgsub[0] == 'r':
subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
z = 0
prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
removeactions)
for i, item in prog:
z += i
repo.ui.progress(_('updating'), z, item=item, total=numupdates,
unit=_('files'))
prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
updateactions)
for i, item in prog:
z += i
repo.ui.progress(_('updating'), z, item=item, total=numupdates,
unit=_('files'))
if hgsub and hgsub[0] == 'g':
subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
_updating = _('updating')
_files = _('files')
progress = repo.ui.progress
for i, a in enumerate(actions):
f, m, args, msg = a
progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files)
if m == "m": # merge
f2, fd, move = args
if fd == '.hgsubstate': # subrepo states need updating
subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
overwrite)
continue
audit(fd)
r = ms.resolve(fd, wctx, mctx)
if r is not None and r > 0:
unresolved += 1
else:
if r is None:
updated += 1
else:
merged += 1
elif m == "d": # directory rename
f2, fd, flags = args
if f:
repo.ui.note(_("moving %s to %s\n") % (f, fd))
audit(f)
repo.wwrite(fd, wctx.filectx(f).data(), flags)
util.unlinkpath(repo.wjoin(f))
if f2:
repo.ui.note(_("getting %s to %s\n") % (f2, fd))
repo.wwrite(fd, mctx.filectx(f2).data(), flags)
updated += 1
elif m == "dr": # divergent renames
fl, = args
repo.ui.warn(_("note: possible conflict - %s was renamed "
"multiple times to:\n") % f)
for nf in fl:
repo.ui.warn(" %s\n" % nf)
elif m == "rd": # rename and delete
fl, = args
repo.ui.warn(_("note: possible conflict - %s was deleted "
"and renamed to:\n") % f)
for nf in fl:
repo.ui.warn(" %s\n" % nf)
elif m == "e": # exec
flags, = args
audit(f)
util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
updated += 1
ms.commit()
progress(_updating, None, total=numupdates, unit=_files)
return updated, merged, removed, unresolved
def calculateupdates(repo, tctx, mctx, ancestor, branchmerge, force, partial,
acceptremote=False):
"Calculate the actions needed to merge mctx into tctx"
actions = []
actions += manifestmerge(repo, tctx, mctx,
ancestor,
branchmerge, force,
partial, acceptremote)
if tctx.rev() is None:
actions += _forgetremoved(tctx, mctx, branchmerge)
return actions
def recordupdates(repo, actions, branchmerge):
"record merge actions to the dirstate"
for a in actions:
f, m, args, msg = a
if m == "r": # remove
if branchmerge:
repo.dirstate.remove(f)
else:
repo.dirstate.drop(f)
elif m == "a": # re-add
if not branchmerge:
repo.dirstate.add(f)
elif m == "f": # forget
repo.dirstate.drop(f)
elif m == "e": # exec change
repo.dirstate.normallookup(f)
elif m == "g": # get
if branchmerge:
repo.dirstate.otherparent(f)
else:
repo.dirstate.normal(f)
elif m == "m": # merge
f2, fd, move = args
if branchmerge:
# We've done a branch merge, mark this file as merged
# so that we properly record the merger later
repo.dirstate.merge(fd)
if f != f2: # copy/rename
if move:
repo.dirstate.remove(f)
if f != fd:
repo.dirstate.copy(f, fd)
else:
repo.dirstate.copy(f2, fd)
else:
# We've update-merged a locally modified file, so
# we set the dirstate to emulate a normal checkout
# of that file some time in the past. Thus our
# merge will appear as a normal local file
# modification.
if f2 == fd: # file not locally copied/moved
repo.dirstate.normallookup(fd)
if move:
repo.dirstate.drop(f)
elif m == "d": # directory rename
f2, fd, flag = args
if not f2 and f not in repo.dirstate:
# untracked file moved
continue
if branchmerge:
repo.dirstate.add(fd)
if f:
repo.dirstate.remove(f)
repo.dirstate.copy(f, fd)
if f2:
repo.dirstate.copy(f2, fd)
else:
repo.dirstate.normal(fd)
if f:
repo.dirstate.drop(f)
def update(repo, node, branchmerge, force, partial, ancestor=None,
mergeancestor=False):
"""
Perform a merge between the working directory and the given node
node = the node to update to, or None if unspecified
branchmerge = whether to merge between branches
force = whether to force branch merging or file overwriting
partial = a function to filter file lists (dirstate not updated)
mergeancestor = whether it is merging with an ancestor. If true,
we should accept the incoming changes for any prompts that occur.
If false, merging with an ancestor (fast-forward) is only allowed
between different named branches. This flag is used by rebase extension
as a temporary fix and should be avoided in general.
The table below shows all the behaviors of the update command
given the -c and -C or no options, whether the working directory
is dirty, whether a revision is specified, and the relationship of
the parent rev to the target rev (linear, on the same named
branch, or on another named branch).
This logic is tested by test-update-branches.t.
-c -C dirty rev | linear same cross
n n n n | ok (1) x
n n n y | ok ok ok
n n y * | merge (2) (2)
n y * * | --- discard ---
y n y * | --- (3) ---
y n n * | --- ok ---
y y * * | --- (4) ---
x = can't happen
* = don't-care
1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
2 = abort: crosses branches (use 'hg merge' to merge or
use 'hg update -C' to discard changes)
3 = abort: uncommitted local changes
4 = incompatible options (checked in commands.py)
Return the same tuple as applyupdates().
"""
onode = node
wlock = repo.wlock()
try:
wc = repo[None]
if node is None:
# tip of current branch
try:
node = repo.branchtip(wc.branch())
except error.RepoLookupError:
if wc.branch() == "default": # no default branch!
node = repo.lookup("tip") # update to tip
else:
raise util.Abort(_("branch %s not found") % wc.branch())
overwrite = force and not branchmerge
pl = wc.parents()
p1, p2 = pl[0], repo[node]
if ancestor:
pa = repo[ancestor]
else:
pa = p1.ancestor(p2)
fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
### check phase
if not overwrite and len(pl) > 1:
raise util.Abort(_("outstanding uncommitted merges"))
if branchmerge:
if pa == p2:
raise util.Abort(_("merging with a working directory ancestor"
" has no effect"))
elif pa == p1:
if not mergeancestor and p1.branch() == p2.branch():
raise util.Abort(_("nothing to merge"),
hint=_("use 'hg update' "
"or check 'hg heads'"))
if not force and (wc.files() or wc.deleted()):
raise util.Abort(_("outstanding uncommitted changes"),
hint=_("use 'hg status' to list changes"))
for s in sorted(wc.substate):
if wc.sub(s).dirty():
raise util.Abort(_("outstanding uncommitted changes in "
"subrepository '%s'") % s)
elif not overwrite:
if pa not in (p1, p2): # nolinear
dirty = wc.dirty(missing=True)
if dirty or onode is None:
# Branching is a bit strange to ensure we do the minimal
# amount of call to obsolete.background.
foreground = obsolete.foreground(repo, [p1.node()])
# note: the <node> variable contains a random identifier
if repo[node].node() in foreground:
pa = p1 # allow updating to successors
elif dirty:
msg = _("crosses branches (merge branches or use"
" --clean to discard changes)")
raise util.Abort(msg)
else: # node is none
msg = _("crosses branches (merge branches or update"
" --check to force update)")
raise util.Abort(msg)
else:
# Allow jumping branches if clean and specific rev given
pa = p1
### calculate phase
actions = calculateupdates(repo, wc, p2, pa,
branchmerge, force, partial, mergeancestor)
### apply phase
if not branchmerge: # just jump to the new rev
fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
if not partial:
repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
stats = applyupdates(repo, actions, wc, p2, pa, overwrite)
if not partial:
repo.setparents(fp1, fp2)
recordupdates(repo, actions, branchmerge)
if not branchmerge:
repo.dirstate.setbranch(p2.branch())
finally:
wlock.release()
if not partial:
repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
return stats
|
|
#!/usr/bin/env python
"""
Train an RST parsing model.
This script takes a JSON-formatted training set created by
``convert_rst_discourse_tb.py``, trains a model, and
saves the model in a user-specified location.
:author: Michael Heilman
:author: Nitin Madnani
:organization: ETS
"""
import argparse
import json
import logging
import os
from collections import Counter
from concurrent.futures import ProcessPoolExecutor
from configparser import ConfigParser
from functools import partial
from os.path import abspath, exists, join
import numpy as np
from nltk.tree import ParentedTree
from skll.experiments import run_configuration
from skll.learner import Learner
from .collapse_rst_labels import collapse_rst_labels
from .discourse_parsing import Parser
from .extract_actions_from_trees import extract_parse_actions
from .rst_eval import predict_and_evaluate_rst_trees
def train_rst_parsing_model(working_path, model_path, C):
"""
Train an RST parsing model on pre-extracted featuers and save to disk.
This function trains a logistic regression RST parsing model on features
that are assumed to have already been extracted to a file called
``rst_parsing.jsonlines`` under ``working_path``. The ``C``
hyperparameter of the model is set to the value provided as input
and the model is saved under ``model_path`` with the name
``rst_parsing_all_feats_LogisticRegression.model``.
Parameters
----------
working_path : str
Path to the directory where the pre-extracted SKLL training features
are stored.
model_path : str
Path to the directory where the trained model will be saved.
C : float
The value for the ``C`` hyperparameter value to be used
when training the model.
"""
# create a sub-directory under ``working_path`` to store the logs
working_subdir = join(working_path, f"C{C}")
os.makedirs(working_subdir, exist_ok=False)
# create ``model_path`` unless it already exists
os.makedirs(model_path, exist_ok=True)
# set up the learner name and settings for use in the SKLL configuration
learner_name = 'LogisticRegression'
fixed_parameters = [{"random_state": 123456789,
"penalty": 'l1',
'C': C}]
# create the SKLL config dictionary
cfg_dict = {"General": {"task": "train",
"experiment_name": "rst_parsing"},
"Input": {"train_directory": working_path,
"ids_to_floats": "False",
"featuresets": json.dumps([["rst_parsing"]]),
"featureset_names": json.dumps(["all_feats"]),
"suffix": '.jsonlines',
"fixed_parameters": json.dumps(fixed_parameters),
"learners": json.dumps([learner_name])},
"Tuning": {"feature_scaling": "none",
"grid_search": "False",
"min_feature_count": "1"},
"Output": {"probability": "True",
"models": model_path,
"log": working_subdir}
}
# save the configuration file to disk
cfg_path = join(working_subdir, "rst_parsing.cfg")
cfg = ConfigParser()
for section_name, section_dict in list(cfg_dict.items()):
cfg.add_section(section_name)
for key, val in section_dict.items():
cfg.set(section_name, key, val)
assert not exists(cfg_path)
with open(cfg_path, 'w') as config_file:
cfg.write(config_file)
# run SKLL to train the model
run_configuration(cfg_path)
# make the trained model smaller/faster by removing features
# that get zero weights due to the L1 regularization
prune_model(model_path, "rst_parsing_all_feats_LogisticRegression.model")
def prune_model(model_path, model_name):
"""
Prune zero-weighted features from the given logistic regression model.
This function makes the given model smaller by removing information
about features that get weights of 0 due to L1-regularization. The
model file is assumed to have the name ``model_name`` and be located
under ``model_path``.
**IMPORTANT**: Note that the input model file is overwritten with
the pruned model.
Parameters
----------
model_path : str
Path to the directory that contains the model file.
model_name : str
The name of the model file.
"""
# load the Learner instance from the model file
model = Learner.from_file(join(model_path, model_name))
# remove coefficients for features that are 0 for all classes
nonzero_feat_mask = ~np.all(model.model.coef_ == 0, axis=0)
model.model.coef_ = model.model.coef_[:, nonzero_feat_mask]
# remove the extra words from the feature vectorizer
model.feat_vectorizer.restrict(nonzero_feat_mask)
# refit the feature selector to expect the correctly-sized matrices
model.feat_selector.fit(np.ones((1, model.model.coef_.shape[1])))
# make the vectorizer return dense matrices since that is a bit faster
model.feat_vectorizer.set_params(sparse=False)
# delete the raw_coef_ attribute that sklearn *only* uses when training
model.model.raw_coef_ = None
# save the pruned model to the same file
model.save(join(model_path, model_name))
def train_and_evaluate_model(working_path, model_path, eval_data, C_value):
"""
Train and evaluate given RST parsing model.
Parameters
----------
working_path : str
Path to the directory where the pre-extracted SKLL training features
are stored.
model_path : str
Prefix for the directory where the trained model will be saved.
The suffix ``.C{C_value}`` is added to create the actual directory
name.
eval_data : str
Path to the JSON file containing the documents on which to
evaluate the trained parser.
C_value : float
The value for the ``C`` hyperparameter to use for the
logistic regression model.
Returns
-------
results : dict
Dictionary containing the evaluation results.
"""
# get the full name of the model directory
logging.info(f"Training model with C = {C_value}")
model_path = f"{model_path}.C{C_value}"
# train a logistic regression parsing model
train_rst_parsing_model(working_path, model_path, C_value)
# instantiate a Parser container to hold the model
rst_parser = Parser(1, 1, 1)
rst_parser.load_model(model_path)
# evaluate the model on the given data
logging.info(f"Evaluating model with C = {C_value}")
results = predict_and_evaluate_rst_trees(None,
None,
rst_parser,
eval_data,
use_gold_syntax=True)
return results
def main(): # noqa: D103
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("train_file",
help="Path to the JSON training data.",
type=argparse.FileType('r'))
parser.add_argument("eval_file",
help="Path to the JSON development or test data for "
"tuning/evaluation.",
type=argparse.FileType('r'))
parser.add_argument("model_path",
help="A prefix for the path where the model file "
"should be saved. A suffix with the C value "
"will be added to create the full path.")
parser.add_argument("-w",
"--working_path",
help="Path to where intermediate files should be "
"stored.",
default=join(os.getcwd(), "working"))
parser.add_argument("-C",
"--C_values",
help="comma-separated list of model complexity "
"hyperparameter values to evaluate.",
default=','.join([str(2.0 ** x) for x in range(-4, 5)]))
parser.add_argument("-v",
"--verbose",
help="Print more status information. For every "
"additional time this flag is specified, "
"output gets more verbose.",
default=0,
action='count')
parser.add_argument("-s",
"--single_process",
action='store_true',
help="Run all hyperparameter values in a single "
"process, to simplify debugging.")
args = parser.parse_args()
# convert given paths to absolute paths
working_path = abspath(args.working_path)
model_path = abspath(args.model_path)
if exists(working_path):
raise IOError(f"{working_path} already exists. Stopping here "
f"to avoid the possibility of overwriting files that "
f"are currently being used.")
else:
os.makedirs(working_path)
# instantiate a parser container
parser = Parser(1, 1, 1)
# convert verbose flag to logging level
log_levels = [logging.WARNING, logging.INFO, logging.DEBUG]
log_level = log_levels[min(args.verbose, 2)]
# format warnings more nicely
logging.captureWarnings(True)
logging.basicConfig(format=("%(asctime)s - %(name)s - %(levelname)s - "
"%(message)s"),
level=log_level)
logger = logging.getLogger(__name__)
# extract the training examples
logger.info("Extracting examples")
train_data = json.load(args.train_file)
eval_data = json.load(args.eval_file)
train_examples = []
# iterate over each document in the training data
for doc_dict in train_data:
path_basename = doc_dict["path_basename"]
logging.info(f"Extracting examples for {path_basename}")
tree = ParentedTree.fromstring(doc_dict['rst_tree'])
collapse_rst_labels(tree)
actions = extract_parse_actions(tree)
# extract the training features
parser_tuples = parser.parse(doc_dict, gold_actions=actions)
for i, (action_str, feats) in enumerate(parser_tuples):
example_id = f"{path_basename}_{i}"
example = {"x": Counter(feats), "y": action_str, "id": example_id}
train_examples.append(example)
# save the training features to disk in SKLL jsonlines format
train_path = join(working_path, "rst_parsing.jsonlines")
with open(train_path, 'w') as train_file:
for example in train_examples:
train_file.write(f"{json.dumps(example)}\n")
# instantiate some variables
best_labeled_f1 = -1.0
best_C = None
# train and evaluate models with different C values in parallel
C_values = [float(x) for x in args.C_values.split(',')]
partial_trainer = partial(train_and_evaluate_model,
working_path,
model_path,
eval_data)
# run in a single process or using multiple processes
if args.single_process:
all_results = [partial_trainer(C_value) for C_value in C_values]
else:
n_workers = len(C_values)
with ProcessPoolExecutor(max_workers=n_workers) as executor:
all_results = executor.map(partial_trainer, C_values)
# find the C value that yields the best model
for C_value, results in zip(C_values, all_results):
results["C"] = C_value
print(json.dumps(sorted(results.items())))
if results["labeled_f1"] > best_labeled_f1:
best_labeled_f1 = results["labeled_f1"]
best_C = C_value
# print out the results for the best C
print(f"best labeled F1 = {best_labeled_f1}, with C = {best_C}")
if __name__ == "__main__":
main()
|
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pyasn1.sf.net/license.html
#
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
from pyasn1.type import base, univ, char, useful
from pyasn1 import debug, error
__all__ = ['encode']
class AbstractItemEncoder(object):
def encode(self, encodeFun, value):
raise error.PyAsn1Error('Not implemented')
class ExplicitlyTaggedItemEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
if isinstance(value, base.AbstractConstructedAsn1Item):
value = value.clone(tagSet=value.getTagSet()[:-1],
cloneValueFlag=1)
else:
value = value.clone(tagSet=value.getTagSet()[:-1])
return encodeFun(value)
explicitlyTaggedItemEncoder = ExplicitlyTaggedItemEncoder()
class BooleanEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
return bool(value)
class IntegerEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
return int(value)
class BitStringEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
return str(value)
class OctetStringEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
return value.asOctets()
class TextStringEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
return value.prettyPrint()
class NullEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
return None
class ObjectIdentifierEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
return str(value)
class RealEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
return float(value)
class SetEncoder(AbstractItemEncoder):
protoDict = dict
def encode(self, encodeFun, value):
value.setDefaultComponents()
value.verifySizeSpec()
substrate = self.protoDict()
for key, subValue in value.items():
if subValue is None: # Optional component
continue
substrate[key] = encodeFun(subValue)
return substrate
class SequenceEncoder(SetEncoder):
protoDict = OrderedDict
class SequenceOfEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
value.verifySizeSpec()
return [encodeFun(x) for x in value]
class ChoiceEncoder(SequenceEncoder):
pass
class AnyEncoder(AbstractItemEncoder):
def encode(self, encodeFun, value):
return value.asOctets()
tagMap = {
univ.Boolean.tagSet: BooleanEncoder(),
univ.Integer.tagSet: IntegerEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.Null.tagSet: NullEncoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
univ.Enumerated.tagSet: IntegerEncoder(),
univ.Real.tagSet: RealEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.SequenceOf.tagSet: SequenceOfEncoder(),
univ.SetOf.tagSet: SequenceOfEncoder(),
univ.Choice.tagSet: ChoiceEncoder(),
# character string types
char.UTF8String.tagSet: TextStringEncoder(),
char.NumericString.tagSet: TextStringEncoder(),
char.PrintableString.tagSet: TextStringEncoder(),
char.TeletexString.tagSet: TextStringEncoder(),
char.VideotexString.tagSet: TextStringEncoder(),
char.IA5String.tagSet: TextStringEncoder(),
char.GraphicString.tagSet: TextStringEncoder(),
char.VisibleString.tagSet: TextStringEncoder(),
char.GeneralString.tagSet: TextStringEncoder(),
char.UniversalString.tagSet: TextStringEncoder(),
char.BMPString.tagSet: TextStringEncoder(),
# useful types
useful.ObjectDescriptor.tagSet: OctetStringEncoder(),
useful.GeneralizedTime.tagSet: OctetStringEncoder(),
useful.UTCTime.tagSet: OctetStringEncoder()
}
# Type-to-codec map for ambiguous ASN.1 types
typeMap = {
univ.Set.typeId: SetEncoder(),
univ.SetOf.typeId: SequenceOfEncoder(),
univ.Sequence.typeId: SequenceEncoder(),
univ.SequenceOf.typeId: SequenceOfEncoder(),
univ.Choice.typeId: ChoiceEncoder(),
univ.Any.typeId: AnyEncoder()
}
class Encoder(object):
# noinspection PyDefaultArgument
def __init__(self, tagMap, typeMap={}):
self.__tagMap = tagMap
self.__typeMap = typeMap
def __call__(self, asn1Value):
if not isinstance(asn1Value, base.Asn1Item):
raise error.PyAsn1Error('value is not valid (should be an instance of an ASN.1 Item)')
if debug.logger & debug.flagEncoder:
debug.scope.push(type(asn1Value).__name__)
debug.logger('encoder called for type %s <%s>' % (type(asn1Value).__name__, asn1Value.prettyPrint()))
tagSet = asn1Value.getTagSet()
if len(tagSet) > 1:
concreteEncoder = explicitlyTaggedItemEncoder
else:
if asn1Value.typeId is not None and asn1Value.typeId in self.__typeMap:
concreteEncoder = self.__typeMap[asn1Value.typeId]
elif tagSet in self.__tagMap:
concreteEncoder = self.__tagMap[tagSet]
else:
tagSet = asn1Value.baseTagSet
if tagSet in self.__tagMap:
concreteEncoder = self.__tagMap[tagSet]
else:
raise error.PyAsn1Error('No encoder for %s' % (asn1Value,))
debug.logger & debug.flagEncoder and debug.logger('using value codec %s chosen by %s' % (type(concreteEncoder).__name__, tagSet))
pyObject = concreteEncoder.encode(self, asn1Value)
if debug.logger & debug.flagEncoder:
debug.logger('encoder %s produced: %s' % (type(concreteEncoder).__name__, repr(pyObject)))
debug.scope.pop()
return pyObject
#: Turns ASN.1 object into a Python built-in type object(s).
#:
#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: walks all its components recursively and produces a Python built-in type or a tree
#: of those.
#:
#: One exception is that instead of :py:class:`dict`, the :py:class:`OrderedDict`
#: can be produced (whenever available) to preserve ordering of the components
#: in ASN.1 SEQUENCE.
#:
#: Parameters
#: ----------
# asn1Value: any pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: pyasn1 object to encode (or a tree of them)
#:
#: Returns
#: -------
#: : :py:class:`object`
#: Python built-in type instance (or a tree of them)
#:
#: Raises
#: ------
#: : :py:class:`pyasn1.error.PyAsn1Error`
#: On encoding errors
encode = Encoder(tagMap, typeMap)
|
|
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from _collections_abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
if a is None:
try:
# Seed with enough bytes to span the 19937 bit
# state space for the Mersenne Twister
a = int.from_bytes(_urandom(2500), 'big')
except NotImplementedError:
import time
a = int(time.time() * 256) # use fractional seconds
if version == 2:
if isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
# Issue 17489: Since __reduce__ was defined to fix #759889 this is no
# longer called; we leave it here because it has been here since random was
# rewritten back in 2001 and why risk breaking something.
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
random = self.random
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overriden random() method but no new getrandbits() method,
# so we can only use random() from here.
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence')
return seq[i]
def shuffle(self, x, random=None):
"""Shuffle list x in place, and return None.
Optional argument random is a 0-argument function returning a
random float in [0.0, 1.0); if it is the default None, the
standard random.random will be used.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
c = 0.5 if mode is None else (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
|
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
from boto.exception import JSONResponseError
from boto.connection import AWSAuthConnection
from boto.regioninfo import RegionInfo
from boto.elastictranscoder import exceptions
class ElasticTranscoderConnection(AWSAuthConnection):
"""
AWS Elastic Transcoder Service
The AWS Elastic Transcoder Service.
"""
APIVersion = "2012-09-25"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "elastictranscoder.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"IncompatibleVersionException": exceptions.IncompatibleVersionException,
"LimitExceededException": exceptions.LimitExceededException,
"ResourceInUseException": exceptions.ResourceInUseException,
"AccessDeniedException": exceptions.AccessDeniedException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalServiceException": exceptions.InternalServiceException,
"ValidationException": exceptions.ValidationException,
}
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
kwargs['host'] = region.endpoint
AWSAuthConnection.__init__(self, **kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def cancel_job(self, id=None):
"""
To cancel a job, send a DELETE request to the
`/2012-09-25/jobs/ [jobId] ` resource.
You can only cancel a job that has a status of `Submitted`. To
prevent a pipeline from starting to process a job while you're
getting the job identifier, use UpdatePipelineStatus to
temporarily pause the pipeline.
:type id: string
:param id: The identifier of the job that you want to delete.
To get a list of the jobs (including their `jobId`) that have a status
of `Submitted`, use the ListJobsByStatus API action.
"""
uri = '/2012-09-25/jobs/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def create_job(self, pipeline_id=None, input_name=None, output=None,
outputs=None, output_key_prefix=None, playlists=None):
"""
To create a job, send a POST request to the `/2012-09-25/jobs`
resource.
When you create a job, Elastic Transcoder returns JSON data
that includes the values that you specified plus information
about the job that is created.
If you have specified more than one output for your jobs (for
example, one output for the Kindle Fire and another output for
the Apple iPhone 4s), you currently must use the Elastic
Transcoder API to list the jobs (as opposed to the AWS
Console).
:type pipeline_id: string
:param pipeline_id: The `Id` of the pipeline that you want Elastic
Transcoder to use for transcoding. The pipeline determines several
settings, including the Amazon S3 bucket from which Elastic
Transcoder gets the files to transcode and the bucket into which
Elastic Transcoder puts the transcoded files.
:type input_name: dict
:param input_name: A section of the request body that provides
information about the file that is being transcoded.
:type output: dict
:param output:
:type outputs: list
:param outputs: A section of the request body that provides information
about the transcoded (target) files. We recommend that you use the
`Outputs` syntax instead of the `Output` syntax.
:type output_key_prefix: string
:param output_key_prefix: The value, if any, that you want Elastic
Transcoder to prepend to the names of all files that this job
creates, including output files, thumbnails, and playlists.
:type playlists: list
:param playlists: If you specify a preset in `PresetId` for which the
value of `Container` is ts (MPEG-TS), Playlists contains
information about the master playlists that you want Elastic
Transcoder to create.
We recommend that you create only one master playlist. The maximum
number of master playlists in a job is 30.
"""
uri = '/2012-09-25/jobs'
params = {}
if pipeline_id is not None:
params['PipelineId'] = pipeline_id
if input_name is not None:
params['Input'] = input_name
if output is not None:
params['Output'] = output
if outputs is not None:
params['Outputs'] = outputs
if output_key_prefix is not None:
params['OutputKeyPrefix'] = output_key_prefix
if playlists is not None:
params['Playlists'] = playlists
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def create_pipeline(self, name=None, input_bucket=None,
output_bucket=None, role=None, notifications=None,
content_config=None, thumbnail_config=None):
"""
To create a pipeline, send a POST request to the
`2012-09-25/pipelines` resource.
:type name: string
:param name: The name of the pipeline. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
Constraints: Maximum 40 characters.
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket in which you saved the media
files that you want to transcode.
:type output_bucket: string
:param output_bucket: The Amazon S3 bucket in which you want Elastic
Transcoder to save the transcoded files. (Use this, or use
ContentConfig:Bucket plus ThumbnailConfig:Bucket.)
Specify this value when all of the following are true:
+ You want to save transcoded files, thumbnails (if any), and playlists
(if any) together in one bucket.
+ You do not want to specify the users or groups who have access to the
transcoded files, thumbnails, and playlists.
+ You do not want to specify the permissions that Elastic Transcoder
grants to the files. When Elastic Transcoder saves files in
`OutputBucket`, it grants full control over the files only to the
AWS account that owns the role that is specified by `Role`.
+ You want to associate the transcoded files and thumbnails with the
Amazon S3 Standard storage class.
If you want to save transcoded files and playlists in one bucket and
thumbnails in another bucket, specify which users can access the
transcoded files or the permissions the users have, or change the
Amazon S3 storage class, omit `OutputBucket` and specify values for
`ContentConfig` and `ThumbnailConfig` instead.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to use to create the pipeline.
:type notifications: dict
:param notifications:
The Amazon Simple Notification Service (Amazon SNS) topic that you want
to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
+ **Progressing**: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process a job in this pipeline. This is
the ARN that Amazon SNS returned when you created the topic. For
more information, see Create a Topic in the Amazon Simple
Notification Service Developer Guide.
+ **Completed**: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job in
this pipeline. This is the ARN that Amazon SNS returned when you
created the topic.
+ **Warning**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition while
processing a job in this pipeline. This is the ARN that Amazon SNS
returned when you created the topic.
+ **Error**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition while
processing a job in this pipeline. This is the ARN that Amazon SNS
returned when you created the topic.
:type content_config: dict
:param content_config:
The optional `ContentConfig` object specifies information about the
Amazon S3 bucket in which you want Elastic Transcoder to save
transcoded files and playlists: which bucket to use, which users
you want to have access to the files, the type of access you want
users to have, and the storage class that you want to assign to the
files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig`.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save transcoded files and playlists.
+ **Permissions** (Optional): The Permissions object specifies which
users you want to have access to transcoded files and the type of
access you want them to have. You can grant permissions to a
maximum of 30 users and/or predefined Amazon S3 groups.
+ **Grantee Type**: Specify the type of value that appears in the
`Grantee` object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. For more information about
canonical user IDs, see Access Control List (ACL) Overview in the
Amazon Simple Storage Service Developer Guide. For more information
about using CloudFront origin access identities to require that
users use CloudFront URLs instead of Amazon S3 URLs, see Using an
Origin Access Identity to Restrict Access to Your Amazon S3
Content. A canonical user ID is not the same as an AWS account
number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
transcoded files and playlists. To identify the user or group, you
can specify the canonical user ID for an AWS account, an origin
access identity for a CloudFront distribution, the registered email
address of an AWS account, or a predefined Amazon S3 group
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the files
that Elastic Transcoder adds to the bucket, including playlists and
video files. Valid values include:
+ `READ`: The grantee can read the objects and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the objects that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the video files and playlists that it stores in your Amazon S3
bucket.
:type thumbnail_config: dict
:param thumbnail_config:
The `ThumbnailConfig` object specifies several values, including the
Amazon S3 bucket in which you want Elastic Transcoder to save
thumbnail files, which users you want to have access to the files,
the type of access you want users to have, and the storage class
that you want to assign to the files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig` even if you don't want to create thumbnails.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save thumbnail files.
+ **Permissions** (Optional): The `Permissions` object specifies which
users and/or predefined Amazon S3 groups you want to have access to
thumbnail files, and the type of access you want them to have. You
can grant permissions to a maximum of 30 users and/or predefined
Amazon S3 groups.
+ **GranteeType**: Specify the type of value that appears in the
Grantee object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. A canonical user ID is not
the same as an AWS account number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
thumbnail files. To identify the user or group, you can specify the
canonical user ID for an AWS account, an origin access identity for
a CloudFront distribution, the registered email address of an AWS
account, or a predefined Amazon S3 group.
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the
thumbnail files that Elastic Transcoder adds to the bucket. Valid
values include:
+ `READ`: The grantee can read the thumbnails and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the thumbnails that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the thumbnails that it stores in your Amazon S3 bucket.
"""
uri = '/2012-09-25/pipelines'
params = {}
if name is not None:
params['Name'] = name
if input_bucket is not None:
params['InputBucket'] = input_bucket
if output_bucket is not None:
params['OutputBucket'] = output_bucket
if role is not None:
params['Role'] = role
if notifications is not None:
params['Notifications'] = notifications
if content_config is not None:
params['ContentConfig'] = content_config
if thumbnail_config is not None:
params['ThumbnailConfig'] = thumbnail_config
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def create_preset(self, name=None, description=None, container=None,
video=None, audio=None, thumbnails=None):
"""
To create a preset, send a POST request to the
`/2012-09-25/presets` resource.
Elastic Transcoder checks the settings that you specify to
ensure that they meet Elastic Transcoder requirements and to
determine whether they comply with H.264 standards. If your
settings are not valid for Elastic Transcoder, Elastic
Transcoder returns an HTTP 400 response (
`ValidationException`) and does not create the preset. If the
settings are valid for Elastic Transcoder but aren't strictly
compliant with the H.264 standard, Elastic Transcoder creates
the preset and returns a warning message in the response. This
helps you determine whether your settings comply with the
H.264 standard while giving you greater flexibility with
respect to the video that Elastic Transcoder produces.
Elastic Transcoder uses the H.264 video-compression format.
For more information, see the International Telecommunication
Union publication Recommendation ITU-T H.264: Advanced video
coding for generic audiovisual services .
:type name: string
:param name: The name of the preset. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
:type description: string
:param description: A description of the preset.
:type container: string
:param container: The container type for the output file. This value
must be `mp4`.
:type video: dict
:param video: A section of the request body that specifies the video
parameters.
:type audio: dict
:param audio: A section of the request body that specifies the audio
parameters.
:type thumbnails: dict
:param thumbnails: A section of the request body that specifies the
thumbnail parameters, if any.
"""
uri = '/2012-09-25/presets'
params = {}
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
if container is not None:
params['Container'] = container
if video is not None:
params['Video'] = video
if audio is not None:
params['Audio'] = audio
if thumbnails is not None:
params['Thumbnails'] = thumbnails
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def delete_pipeline(self, id=None):
"""
To delete a pipeline, send a DELETE request to the
`/2012-09-25/pipelines/ [pipelineId] ` resource.
You can only delete a pipeline that has never been used or
that is not currently in use (doesn't contain any active
jobs). If the pipeline is currently in use, `DeletePipeline`
returns an error.
:type id: string
:param id: The identifier of the pipeline that you want to delete.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def delete_preset(self, id=None):
"""
To delete a preset, send a DELETE request to the
`/2012-09-25/presets/ [presetId] ` resource.
If the preset has been used, you cannot delete it.
:type id: string
:param id: The identifier of the preset for which you want to get
detailed information.
"""
uri = '/2012-09-25/presets/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def list_jobs_by_pipeline(self, pipeline_id=None, ascending=None,
page_token=None):
"""
To get a list of the jobs currently in a pipeline, send a GET
request to the `/2012-09-25/jobsByPipeline/ [pipelineId] `
resource.
Elastic Transcoder returns all of the jobs currently in the
specified pipeline. The response body contains one element for
each job that satisfies the search criteria.
:type pipeline_id: string
:param pipeline_id: The ID of the pipeline for which you want to get
job information.
:type ascending: string
:param ascending: To list jobs in chronological order by the date and
time that they were submitted, enter `True`. To list jobs in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/jobsByPipeline/{0}'.format(pipeline_id)
params = {}
if pipeline_id is not None:
params['PipelineId'] = pipeline_id
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_jobs_by_status(self, status=None, ascending=None,
page_token=None):
"""
To get a list of the jobs that have a specified status, send a
GET request to the `/2012-09-25/jobsByStatus/ [status] `
resource.
Elastic Transcoder returns all of the jobs that have the
specified status. The response body contains one element for
each job that satisfies the search criteria.
:type status: string
:param status: To get information about all of the jobs associated with
the current AWS account that have a given status, specify the
following status: `Submitted`, `Progressing`, `Complete`,
`Canceled`, or `Error`.
:type ascending: string
:param ascending: To list jobs in chronological order by the date and
time that they were submitted, enter `True`. To list jobs in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/jobsByStatus/{0}'.format(status)
params = {}
if status is not None:
params['Status'] = status
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_pipelines(self):
"""
To get a list of the pipelines associated with the current AWS
account, send a GET request to the `/2012-09-25/pipelines`
resource.
"""
uri = '/2012-09-25/pipelines'
return self.make_request('GET', uri, expected_status=200)
def list_presets(self):
"""
To get a list of all presets associated with the current AWS
account, send a GET request to the `/2012-09-25/presets`
resource.
"""
uri = '/2012-09-25/presets'
return self.make_request('GET', uri, expected_status=200)
def read_job(self, id=None):
"""
To get detailed information about a job, send a GET request to
the `/2012-09-25/jobs/ [jobId] ` resource.
:type id: string
:param id: The identifier of the job for which you want to get detailed
information.
"""
uri = '/2012-09-25/jobs/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def read_pipeline(self, id=None):
"""
To get detailed information about a pipeline, send a GET
request to the `/2012-09-25/pipelines/ [pipelineId] `
resource.
:type id: string
:param id: The identifier of the pipeline to read.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def read_preset(self, id=None):
"""
To get detailed information about a preset, send a GET request
to the `/2012-09-25/presets/ [presetId] ` resource.
:type id: string
:param id: The identifier of the preset for which you want to get
detailed information.
"""
uri = '/2012-09-25/presets/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def test_role(self, role=None, input_bucket=None, output_bucket=None,
topics=None):
"""
To test the IAM role that's used by Elastic Transcoder to
create the pipeline, send a POST request to the
`/2012-09-25/roleTests` resource.
The `TestRole` action lets you determine whether the IAM role
you are using has sufficient permissions to let Elastic
Transcoder perform tasks associated with the transcoding
process. The action attempts to assume the specified IAM role,
checks read access to the input and output buckets, and tries
to send a test notification to Amazon SNS topics that you
specify.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to test.
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket that contains media files to
be transcoded. The action attempts to read from this bucket.
:type output_bucket: string
:param output_bucket: The Amazon S3 bucket that Elastic Transcoder will
write transcoded media files to. The action attempts to read from
this bucket.
:type topics: list
:param topics: The ARNs of one or more Amazon Simple Notification
Service (Amazon SNS) topics that you want the action to send a test
notification to.
"""
uri = '/2012-09-25/roleTests'
params = {}
if role is not None:
params['Role'] = role
if input_bucket is not None:
params['InputBucket'] = input_bucket
if output_bucket is not None:
params['OutputBucket'] = output_bucket
if topics is not None:
params['Topics'] = topics
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline(self, id, name=None, input_bucket=None, role=None,
notifications=None, content_config=None,
thumbnail_config=None):
"""
:type id: string
:param id:
:type name: string
:param name:
:type input_bucket: string
:param input_bucket:
:type role: string
:param role:
:type notifications: dict
:param notifications:
:type content_config: dict
:param content_config:
:type thumbnail_config: dict
:param thumbnail_config:
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
params = {}
if name is not None:
params['Name'] = name
if input_bucket is not None:
params['InputBucket'] = input_bucket
if role is not None:
params['Role'] = role
if notifications is not None:
params['Notifications'] = notifications
if content_config is not None:
params['ContentConfig'] = content_config
if thumbnail_config is not None:
params['ThumbnailConfig'] = thumbnail_config
return self.make_request('PUT', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline_notifications(self, id=None, notifications=None):
"""
To update Amazon Simple Notification Service (Amazon SNS)
notifications for a pipeline, send a POST request to the
`/2012-09-25/pipelines/ [pipelineId] /notifications` resource.
When you update notifications for a pipeline, Elastic
Transcoder returns the values that you specified in the
request.
:type id: string
:param id: The identifier of the pipeline for which you want to change
notification settings.
:type notifications: dict
:param notifications:
The topic ARN for the Amazon Simple Notification Service (Amazon SNS)
topic that you want to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
+ **Progressing**: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process jobs that are added to this
pipeline. This is the ARN that Amazon SNS returned when you created
the topic.
+ **Completed**: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job.
This is the ARN that Amazon SNS returned when you created the
topic.
+ **Warning**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition. This
is the ARN that Amazon SNS returned when you created the topic.
+ **Error**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition. This
is the ARN that Amazon SNS returned when you created the topic.
"""
uri = '/2012-09-25/pipelines/{0}/notifications'.format(id)
params = {}
if id is not None:
params['Id'] = id
if notifications is not None:
params['Notifications'] = notifications
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline_status(self, id=None, status=None):
"""
To pause or reactivate a pipeline, so the pipeline stops or
restarts processing jobs, update the status for the pipeline.
Send a POST request to the `/2012-09-25/pipelines/
[pipelineId] /status` resource.
Changing the pipeline status is useful if you want to cancel
one or more jobs. You can't cancel jobs after Elastic
Transcoder has started processing them; if you pause the
pipeline to which you submitted the jobs, you have more time
to get the job IDs for the jobs that you want to cancel, and
to send a CancelJob request.
:type id: string
:param id: The identifier of the pipeline to update.
:type status: string
:param status:
The desired status of the pipeline:
+ `Active`: The pipeline is processing jobs.
+ `Paused`: The pipeline is not currently processing jobs.
"""
uri = '/2012-09-25/pipelines/{0}/status'.format(id)
params = {}
if id is not None:
params['Id'] = id
if status is not None:
params['Status'] = status
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def make_request(self, verb, resource, headers=None, data='',
expected_status=None, params=None):
if headers is None:
headers = {}
response = AWSAuthConnection.make_request(
self, verb, resource, headers=headers, data=data)
body = json.load(response)
if response.status == expected_status:
return body
else:
error_type = response.getheader('x-amzn-ErrorType').split(':')[0]
error_class = self._faults.get(error_type, self.ResponseError)
raise error_class(response.status, response.reason, body)
|
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import logging
import time
import threading
from botocore.vendored.requests.adapters import HTTPAdapter
from botocore.vendored.requests.sessions import Session
from botocore.vendored.requests.utils import get_environ_proxies
from botocore.vendored.requests.exceptions import ConnectionError
from botocore.vendored import six
from botocore.awsrequest import create_request_object
from botocore.exceptions import UnknownEndpointError
from botocore.exceptions import EndpointConnectionError
from botocore.exceptions import ConnectionClosedError
from botocore.compat import filter_ssl_warnings
from botocore.utils import is_valid_endpoint_url
from botocore.hooks import first_non_none_response
from botocore.response import StreamingBody
from botocore import parsers
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
filter_ssl_warnings()
try:
from botocore.vendored.requests.packages.urllib3.contrib import pyopenssl
pyopenssl.extract_from_urllib3()
except ImportError:
pass
def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
}
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_streaming_output:
response_dict['body'] = StreamingBody(
http_response.raw, response_dict['headers'].get('content-length'))
else:
response_dict['body'] = http_response.content
return response_dict
class BotocoreHTTPSession(Session):
"""Internal session class used to workaround requests behavior.
This class is intended to be used only by the Endpoint class.
"""
def __init__(self, max_pool_connections=MAX_POOL_CONNECTIONS,
http_adapter_cls=HTTPAdapter):
super(BotocoreHTTPSession, self).__init__()
# In order to support a user provided "max_pool_connections", we need
# to recreate the HTTPAdapter and pass in our max_pool_connections
# value.
adapter = http_adapter_cls(pool_maxsize=max_pool_connections)
# requests uses an HTTPAdapter for mounting both http:// and https://
self.mount('https://', adapter)
self.mount('http://', adapter)
def rebuild_auth(self, prepared_request, response):
# Keep the existing auth information from the original prepared request.
# Normally this method would be where auth is regenerated as needed.
# By making this a noop, we're keeping the existing auth info.
pass
class Endpoint(object):
"""
Represents an endpoint for a particular service in a specific
region. Only an endpoint can make requests.
:ivar service: The Service object that describes this endpoints
service.
:ivar host: The fully qualified endpoint hostname.
:ivar session: The session object.
"""
def __init__(self, host, endpoint_prefix,
event_emitter, proxies=None, verify=True,
timeout=DEFAULT_TIMEOUT, response_parser_factory=None,
max_pool_connections=MAX_POOL_CONNECTIONS):
self._endpoint_prefix = endpoint_prefix
self._event_emitter = event_emitter
self.host = host
self.verify = verify
if proxies is None:
proxies = {}
self.proxies = proxies
self.http_session = BotocoreHTTPSession(
max_pool_connections=max_pool_connections)
self.timeout = timeout
self.max_pool_connections = max_pool_connections
logger.debug('Setting %s timeout as %s', endpoint_prefix, self.timeout)
self._lock = threading.Lock()
if response_parser_factory is None:
response_parser_factory = parsers.ResponseParserFactory()
self._response_parser_factory = response_parser_factory
def __repr__(self):
return '%s(%s)' % (self._endpoint_prefix, self.host)
def make_request(self, operation_model, request_dict):
logger.debug("Making request for %s (verify_ssl=%s) with params: %s",
operation_model, self.verify, request_dict)
return self._send_request(request_dict, operation_model)
def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
event_name = 'request-created.{endpoint_prefix}.{op_name}'.format(
endpoint_prefix=self._endpoint_prefix,
op_name=operation_model.name)
self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
def _encode_headers(self, headers):
# In place encoding of headers to utf-8 if they are unicode.
for key, value in headers.items():
if isinstance(value, six.text_type):
headers[key] = value.encode('utf-8')
def prepare_request(self, request):
self._encode_headers(request.headers)
return request.prepare()
def _send_request(self, request_dict, operation_model):
attempts = 1
request = self.create_request(request_dict, operation_model)
success_response, exception = self._get_response(
request, operation_model, attempts)
while self._needs_retry(attempts, operation_model, request_dict,
success_response, exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = self.create_request(
request_dict, operation_model)
success_response, exception = self._get_response(
request, operation_model, attempts)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
def _get_response(self, request, operation_model, attempts):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
try:
logger.debug("Sending http request: %s", request)
http_response = self.http_session.send(
request, verify=self.verify,
stream=operation_model.has_streaming_output,
proxies=self.proxies, timeout=self.timeout)
except ConnectionError as e:
# For a connection error, if it looks like it's a DNS
# lookup issue, 99% of the time this is due to a misconfigured
# region/endpoint so we'll raise a more specific error message
# to help users.
logger.debug("ConnectionError received when sending HTTP request.",
exc_info=True)
if self._looks_like_dns_error(e):
endpoint_url = e.request.url
better_exception = EndpointConnectionError(
endpoint_url=endpoint_url, error=e)
return (None, better_exception)
elif self._looks_like_bad_status_line(e):
better_exception = ConnectionClosedError(
endpoint_url=e.request.url, request=e.request)
return (None, better_exception)
else:
return (None, e)
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return (None, e)
# This returns the http_response and the parsed_data.
response_dict = convert_to_response_dict(http_response,
operation_model)
parser = self._response_parser_factory.create_parser(
operation_model.metadata['protocol'])
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
return (http_response, parsed_response), None
def _looks_like_dns_error(self, e):
return 'gaierror' in str(e) and e.request is not None
def _looks_like_bad_status_line(self, e):
return 'BadStatusLine' in str(e) and e.request is not None
def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
event_name = 'needs-retry.%s.%s' % (self._endpoint_prefix,
operation_model.name)
responses = self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
time.sleep(handler_response)
return True
class EndpointCreator(object):
def __init__(self, event_emitter):
self._event_emitter = event_emitter
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
return Endpoint(
endpoint_url,
endpoint_prefix=service_model.endpoint_prefix,
event_emitter=self._event_emitter,
proxies=self._get_proxies(endpoint_url),
verify=self._get_verify_value(verify),
timeout=timeout,
max_pool_connections=max_pool_connections,
response_parser_factory=response_parser_factory)
def _get_proxies(self, url):
# We could also support getting proxies from a config file,
# but for now proxy support is taken from the environment.
return get_environ_proxies(url)
def _get_verify_value(self, verify):
# This is to account for:
# https://github.com/kennethreitz/requests/issues/1436
# where we need to honor REQUESTS_CA_BUNDLE because we're creating our
# own request objects.
# First, if verify is not None, then the user explicitly specified
# a value so this automatically wins.
if verify is not None:
return verify
# Otherwise use the value from REQUESTS_CA_BUNDLE, or default to
# True if the env var does not exist.
return os.environ.get('REQUESTS_CA_BUNDLE', True)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The obfs3 module implements the obfs3 protocol.
"""
import random
import obfsproxy.common.aes as aes
import obfsproxy.transports.base as base
import obfsproxy.transports.obfs3_dh as obfs3_dh
import obfsproxy.common.log as logging
import obfsproxy.common.hmac_sha256 as hmac_sha256
import obfsproxy.common.rand as rand
log = logging.get_obfslogger()
MAX_PADDING = 8194
PUBKEY_LEN = 192
KEYLEN = 16 # is the length of the key used by E(K,s) -- that is, 16.
HASHLEN = 32 # length of output of sha256
ST_WAIT_FOR_KEY = 0 # Waiting for public key from the other party
ST_SEARCHING_MAGIC = 1 # Waiting for magic strings from the other party
ST_OPEN = 2 # obfs3 handshake is complete. Sending application data.
class Obfs3Transport(base.BaseTransport):
"""
Obfs3Transport implements the obfs3 protocol.
"""
def __init__(self):
"""Initialize the obfs3 pluggable transport."""
super(Obfs3Transport, self).__init__()
# Our state.
self.state = ST_WAIT_FOR_KEY
# Uniform-DH object
self.dh = obfs3_dh.UniformDH()
# DH shared secret
self.shared_secret = None
# Bytes of padding scanned so far.
self.scanned_padding = 0
# Last padding bytes scanned.
self.last_padding_chunk = ''
# Magic value that the other party is going to send
# (initialized after deriving shared secret)
self.other_magic_value = None
# Crypto to encrypt outgoing data.
self.send_crypto = None
# Crypto to decrypt incoming data.
self.recv_crypto = None
# Buffer for the first data, Tor is trying to send but can't right now
# because we have to handle the DH handshake first.
self.queued_data = ''
# Attributes below are filled by classes that inherit Obfs3Transport.
self.send_keytype = None
self.recv_keytype = None
self.send_magic_const = None
self.recv_magic_const = None
self.we_are_initiator = None
def circuitConnected(self):
"""
Do the obfs3 handshake:
PUBKEY | WR(PADLEN)
"""
padding_length = random.randint(0, MAX_PADDING/2)
handshake_message = self.dh.get_public() + rand.random_bytes(padding_length)
log.debug("obfs3 handshake: %s queued %d bytes (padding_length: %d) (public key: %s).",
"initiator" if self.we_are_initiator else "responder",
len(handshake_message), padding_length, repr(self.dh.get_public()))
self.circuit.downstream.write(handshake_message)
def receivedUpstream(self, data):
"""
Got data from upstream. We need to obfuscated and proxy them downstream.
"""
if not self.send_crypto:
log.debug("Got upstream data before doing handshake. Caching.")
self.queued_data += data.read()
return
message = self.send_crypto.crypt(data.read())
log.debug("obfs3 receivedUpstream: Transmitting %d bytes.", len(message))
# Proxy encrypted message.
self.circuit.downstream.write(message)
def receivedDownstream(self, data):
"""
Got data from downstream. We need to de-obfuscate them and
proxy them upstream.
"""
if self.state == ST_WAIT_FOR_KEY: # Looking for the other peer's pubkey
self._read_handshake(data)
if self.state == ST_SEARCHING_MAGIC: # Looking for the magic string
self._scan_for_magic(data)
if self.state == ST_OPEN: # Handshake is done. Just decrypt and read application data.
log.debug("obfs3 receivedDownstream: Processing %d bytes of application data." %
len(data))
self.circuit.upstream.write(self.recv_crypto.crypt(data.read()))
def _read_handshake(self, data):
"""
Read handshake message, parse the other peer's public key and
set up our crypto.
"""
log_prefix = "obfs3:_read_handshake()"
if len(data) < PUBKEY_LEN:
log.debug("%s: Not enough bytes for key (%d)." % (log_prefix, len(data)))
return
log.debug("%s: Got %d bytes of handshake data (waiting for key)." % (log_prefix, len(data)))
# Get the public key from the handshake message, do the DH and
# get the shared secret.
other_pubkey = data.read(PUBKEY_LEN)
try:
self.shared_secret = self.dh.get_secret(other_pubkey)
except ValueError:
raise base.PluggableTransportError("obfs3: Corrupted public key '%s'" % repr(other_pubkey))
log.debug("Got public key: %s.\nGot shared secret: %s" %
(repr(other_pubkey), repr(self.shared_secret)))
# Set up our crypto.
self.send_crypto = self._derive_crypto(self.send_keytype)
self.recv_crypto = self._derive_crypto(self.recv_keytype)
self.other_magic_value = hmac_sha256.hmac_sha256_digest(self.shared_secret,
self.recv_magic_const)
# Send our magic value to the remote end and append the queued outgoing data.
# Padding is prepended so that the server does not just send the 32-byte magic
# in a single TCP segment.
padding_length = random.randint(0, MAX_PADDING/2)
magic = hmac_sha256.hmac_sha256_digest(self.shared_secret, self.send_magic_const)
message = rand.random_bytes(padding_length) + magic + self.send_crypto.crypt(self.queued_data)
self.queued_data = ''
log.debug("%s: Transmitting %d bytes (with magic)." % (log_prefix, len(message)))
self.circuit.downstream.write(message)
self.state = ST_SEARCHING_MAGIC
def _scan_for_magic(self, data):
"""
Scan 'data' for the magic string. If found, drain it and all
the padding before it. Then open the connection.
"""
log_prefix = "obfs3:_scan_for_magic()"
log.debug("%s: Searching for magic." % log_prefix)
assert(self.other_magic_value)
chunk = data.peek()
index = chunk.find(self.other_magic_value)
if index < 0:
if (len(data) > MAX_PADDING+HASHLEN):
raise base.PluggableTransportError("obfs3: Too much padding (%d)!" % len(data))
log.debug("%s: Did not find magic this time (%d)." % (log_prefix, len(data)))
return
index += len(self.other_magic_value)
log.debug("%s: Found magic. Draining %d bytes." % (log_prefix, index))
data.drain(index)
self.state = ST_OPEN
def _derive_crypto(self, pad_string):
"""
Derive and return an obfs3 key using the pad string in 'pad_string'.
"""
secret = hmac_sha256.hmac_sha256_digest(self.shared_secret, pad_string)
return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])
class Obfs3Client(Obfs3Transport):
"""
Obfs3Client is a client for the obfs3 protocol.
The client and server differ in terms of their padding strings.
"""
def __init__(self):
Obfs3Transport.__init__(self)
self.send_keytype = "Initiator obfuscated data"
self.recv_keytype = "Responder obfuscated data"
self.send_magic_const = "Initiator magic"
self.recv_magic_const = "Responder magic"
self.we_are_initiator = True
class Obfs3Server(Obfs3Transport):
"""
Obfs3Server is a server for the obfs3 protocol.
The client and server differ in terms of their padding strings.
"""
def __init__(self):
Obfs3Transport.__init__(self)
self.send_keytype = "Responder obfuscated data"
self.recv_keytype = "Initiator obfuscated data"
self.send_magic_const = "Responder magic"
self.recv_magic_const = "Initiator magic"
self.we_are_initiator = False
|
|
"""
factor.py
"""
from operator import attrgetter
from numbers import Number
from numpy import float64, inf
from toolz import curry
from zipline.errors import (
UnknownRankMethod,
UnsupportedDataType,
)
from zipline.lib.rank import masked_rankdata_2d
from zipline.pipeline.mixins import (
CustomTermMixin,
PositiveWindowLengthMixin,
SingleInputMixin,
)
from zipline.pipeline.term import CompositeTerm, NotSpecified
from zipline.pipeline.expression import (
BadBinaryOperator,
COMPARISONS,
is_comparison,
MATH_BINOPS,
method_name_for_op,
NumericalExpression,
NUMEXPR_MATH_FUNCS,
UNARY_OPS,
unary_op_name,
)
from zipline.pipeline.filters import (
NumExprFilter,
PercentileFilter,
)
from zipline.utils.control_flow import nullctx
from zipline.utils.numpy_utils import (
bool_dtype,
datetime64ns_dtype,
float64_dtype,
)
from zipline.utils.preprocess import preprocess
_RANK_METHODS = frozenset(['average', 'min', 'max', 'dense', 'ordinal'])
def numbers_to_float64(func, argname, argvalue):
"""
Preprocessor for converting numerical inputs into floats.
This is used in the binary operator constructors for Factor so that
`2 + Factor()` has the same behavior as `2.0 + Factor()`.
"""
if isinstance(argvalue, Number):
return float64(argvalue)
return argvalue
@curry
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Usage
-----
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator
# Decorators for setting the __name__ and __doc__ properties of a decorated
# function.
# Example:
with_name = set_attribute('__name__')
with_doc = set_attribute('__doc__')
def binop_return_type(op):
if is_comparison(op):
return NumExprFilter
else:
return NumExprFactor
def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`.
"""
if is_comparison(op):
if left != right:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported on Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype
def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
@preprocess(other=numbers_to_float64)
@with_doc("Binary Operator: '%s'" % op)
@with_name(method_name_for_op(op))
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = binop_return_type(op)
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Factor):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# Interpret numeric literals as floats.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator
def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
"""
assert not is_comparison(op)
@preprocess(other=numbers_to_float64)
@with_name(method_name_for_op(op, commute=True))
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other
)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype)
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator
def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
"""
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc
FACTOR_DTYPES = frozenset([datetime64ns_dtype, float64_dtype])
class Factor(CompositeTerm):
"""
Pipeline API expression producing numerically-valued outputs.
"""
# Dynamically add functions for creating NumExprFactor/NumExprFilter
# instances.
clsdict = locals()
clsdict.update(
{
method_name_for_op(op): binary_operator(op)
# Don't override __eq__ because it breaks comparisons on tuples of
# Factors.
for op in MATH_BINOPS.union(COMPARISONS - {'=='})
}
)
clsdict.update(
{
method_name_for_op(op, commute=True): reflected_binary_operator(op)
for op in MATH_BINOPS
}
)
clsdict.update(
{
unary_op_name(op): unary_operator(op)
for op in UNARY_OPS
}
)
clsdict.update(
{
funcname: function_application(funcname)
for funcname in NUMEXPR_MATH_FUNCS
}
)
__truediv__ = clsdict['__div__']
__rtruediv__ = clsdict['__rdiv__']
eq = binary_operator('==')
def _validate(self):
# Do superclass validation first so that `NotSpecified` dtypes get
# handled.
retval = super(Factor, self)._validate()
if self.dtype not in FACTOR_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype
)
return retval
def rank(self, method='ordinal', ascending=True, mask=NotSpecified):
"""
Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
Returns
-------
ranks : zipline.pipeline.factors.Rank
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
scipy.stats.rankdata
zipline.lib.rank.masked_rankdata_2d
zipline.pipeline.factors.factor.Rank
"""
return Rank(self, method=method, ascending=ascending, mask=mask)
def top(self, N, mask=NotSpecified):
"""
Construct a Filter matching the top N asset values of self each day.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, top values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
Returns
-------
filter : zipline.pipeline.filters.Filter
"""
return self.rank(ascending=False, mask=mask) <= N
def bottom(self, N, mask=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask) <= N
def percentile_between(self,
min_percentile,
max_percentile,
mask=NotSpecified):
"""
Construct a new Filter representing entries from the output of this
Factor that fall within the percentile range defined by min_percentile
and max_percentile.
Parameters
----------
min_percentile : float [0.0, 100.0]
Return True for assets falling above this percentile in the data.
max_percentile : float [0.0, 100.0]
Return True for assets falling below this percentile in the data.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when percentile
thresholds. If mask is supplied, percentile cutoffs are computed
each day using only assets for which `mask` returns True, and
assets not passing `mask` will produce False in the output of this
filter as well.
Returns
-------
out : zipline.pipeline.filters.PercentileFilter
A new filter that will compute the specified percentile-range mask.
See Also
--------
zipline.pipeline.filters.filter.PercentileFilter
"""
return PercentileFilter(
self,
min_percentile=min_percentile,
max_percentile=max_percentile,
mask=mask,
)
def isnan(self):
"""
A Filter producing True for all values where this Factor is NaN.
Returns
-------
nanfilter : zipline.pipeline.filters.Filter
"""
return self != self
def notnan(self):
"""
A Filter producing True for values where this Factor is not NaN.
Returns
-------
nanfilter : zipline.pipeline.filters.Filter
"""
return ~self.isnan()
def isfinite(self):
"""
A Filter producing True for values where this Factor is anything but
NaN, inf, or -inf.
"""
return (-inf < self) & (self < inf)
class NumExprFactor(NumericalExpression, Factor):
"""
Factor computed from a numexpr expression.
Parameters
----------
expr : string
A string suitable for passing to numexpr. All variables in 'expr'
should be of the form "x_i", where i is the index of the corresponding
factor input in 'binds'.
binds : tuple
A tuple of factors to use as inputs.
Notes
-----
NumExprFactors are constructed by numerical operators like `+` and `-`.
Users should rarely need to construct a NumExprFactor directly.
"""
pass
class Rank(SingleInputMixin, Factor):
"""
A Factor representing the row-wise rank data of another Factor.
Parameters
----------
factor : zipline.pipeline.factors.Factor
The factor on which to compute ranks.
method : str, {'average', 'min', 'max', 'dense', 'ordinal'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for each
ranking method.
See Also
--------
scipy.stats.rankdata : Underlying ranking algorithm.
zipline.factors.Factor.rank : Method-style interface to same functionality.
Notes
-----
Most users should call Factor.rank rather than directly construct an
instance of this class.
"""
window_length = 0
dtype = float64_dtype
def __new__(cls, factor, method, ascending, mask):
return super(Rank, cls).__new__(
cls,
inputs=(factor,),
method=method,
ascending=ascending,
mask=mask,
)
def _init(self, method, ascending, *args, **kwargs):
self._method = method
self._ascending = ascending
return super(Rank, self)._init(*args, **kwargs)
@classmethod
def static_identity(cls, method, ascending, *args, **kwargs):
return (
super(Rank, cls).static_identity(*args, **kwargs),
method,
ascending,
)
def _validate(self):
"""
Verify that the stored rank method is valid.
"""
if self._method not in _RANK_METHODS:
raise UnknownRankMethod(
method=self._method,
choices=set(_RANK_METHODS),
)
return super(Rank, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
"""
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
)
def __repr__(self):
return "{type}({input_}, method='{method}', mask={mask})".format(
type=type(self).__name__,
input_=self.inputs[0],
method=self._method,
mask=self.mask,
)
class CustomFactor(PositiveWindowLengthMixin, CustomTermMixin, Factor):
'''
Base class for user-defined Factors.
Parameters
----------
inputs : iterable, optional
An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),
describing the data to load and pass to `self.compute`. If this
argument is passed to the CustomFactor constructor, we look for a
class-level attribute named `inputs`.
window_length : int, optional
Number of rows to pass for each input. If this argument is not passed
to the CustomFactor constructor, we look for a class-level attribute
named `window_length`.
Notes
-----
Users implementing their own Factors should subclass CustomFactor and
implement a method named `compute` with the following signature:
.. code-block:: python
def compute(self, today, assets, out, *inputs):
...
On each simulation date, ``compute`` will be called with the current date,
an array of sids, an output array, and an input array for each expression
passed as inputs to the CustomFactor constructor.
The specific types of the values passed to `compute` are as follows::
today : np.datetime64[ns]
Row label for the last row of all arrays passed as `inputs`.
assets : np.array[int64, ndim=1]
Column labels for `out` and`inputs`.
out : np.array[self.dtype, ndim=1]
Output array of the same shape as `assets`. `compute` should write
its desired return values into `out`.
*inputs : tuple of np.array
Raw data arrays corresponding to the values of `self.inputs`.
``compute`` functions should expect to be passed NaN values for dates on
which no data was available for an asset. This may include dates on which
an asset did not yet exist.
For example, if a CustomFactor requires 10 rows of close price data, and
asset A started trading on Monday June 2nd, 2014, then on Tuesday, June
3rd, 2014, the column of input data for asset A will have 9 leading NaNs
for the preceding days on which data was not yet available.
Examples
--------
A CustomFactor with pre-declared defaults:
.. code-block:: python
class TenDayRange(CustomFactor):
"""
Computes the difference between the highest high in the last 10
days and the lowest low.
Pre-declares high and low as default inputs and `window_length` as
10.
"""
inputs = [USEquityPricing.high, USEquityPricing.low]
window_length = 10
def compute(self, today, assets, out, highs, lows):
from numpy import nanmin, nanmax
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
out[:] = highest_highs - lowest_lows
# Doesn't require passing inputs or window_length because they're
# pre-declared as defaults for the TenDayRange class.
ten_day_range = TenDayRange()
A CustomFactor without defaults:
.. code-block:: python
class MedianValue(CustomFactor):
"""
Computes the median value of an arbitrary single input over an
arbitrary window..
Does not declare any defaults, so values for `window_length` and
`inputs` must be passed explicitly on every construction.
"""
def compute(self, today, assets, out, data):
from numpy import nanmedian
out[:] = data.nanmedian(data, axis=0)
# Values for `inputs` and `window_length` must be passed explicitly to
# MedianValue.
median_close10 = MedianValue([USEquityPricing.close], window_length=10)
median_low15 = MedianValue([USEquityPricing.low], window_length=15)
'''
dtype = float64_dtype
ctx = nullctx()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.