repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
aam-at/tensorflow
|
tensorflow/python/kernel_tests/xent_op_test.py
|
5
|
15784
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class XentTest(test.TestCase):
def _npXent(self, features, labels, dim=-1):
if dim == -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
e = np.exp(
features - np.reshape(np.amax(features, axis=dim), one_only_on_dim))
probs = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
bp = (probs - labels)
l = -np.sum(labels * np.log(probs + 1.0e-20), axis=dim)
return l, bp
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
# are needed.
def _testXent(self,
np_features,
np_labels,
use_gpu=False,
with_placeholders=False):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.cached_session(use_gpu=use_gpu) as sess:
if with_placeholders:
features_placeholder = array_ops.placeholder(np_features.dtype)
labels_placeholder = array_ops.placeholder(np_labels.dtype)
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
labels=labels_placeholder, features=features_placeholder)
tf_loss, tf_backprop = sess.run([loss, backprop],
feed_dict={
labels_placeholder: np_labels,
features_placeholder: np_features
})
else:
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss, half_rtol=1e-2)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def _testXentWrapper(self, np_features, np_labels, dim=-1, use_gpu=False):
np_loss, _ = self._npXent(np_features, np_labels, dim=dim)
with self.cached_session(use_gpu=use_gpu) as sess:
loss = nn_ops.softmax_cross_entropy_with_logits(
labels=np_labels, logits=np_features, dim=dim)
tf_loss = self.evaluate(loss)
print("np_loss:", np_loss)
print("tf_loss:", tf_loss)
self.assertAllCloseAccordingToType(np_loss, tf_loss)
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
# are needed.
def _testAll(self, features, labels, with_placeholders=False):
self._testXent(
features, labels, use_gpu=False, with_placeholders=with_placeholders)
self._testXent(
features, labels, use_gpu=True, with_placeholders=with_placeholders)
def _testSingleClass(self, use_gpu=False):
for dtype in np.float16, np.float32:
with self.cached_session(use_gpu=use_gpu) as sess:
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(dtype),
np.array([[-1.], [0.], [1.]]).astype(dtype))
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[2.0], [1.0], [0.0]], tf_backprop)
def testSingleClass(self):
self._testSingleClass(True)
self._testSingleClass(False)
@test_util.run_deprecated_v1
def testRankTooLarge(self):
for dtype in np.float16, np.float32:
np_features = np.array([[[1., 1., 1., 1.]], [[1., 2., 3.,
4.]]]).astype(dtype)
np_labels = np.array([[[0., 0., 0., 1.]], [[0., .5, .5,
0.]]]).astype(dtype)
self.assertRaisesRegex(ValueError, "rank 2, but is rank 3",
gen_nn_ops.softmax_cross_entropy_with_logits,
np_features, np_labels)
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with soft targets (1, 2).
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [[0., 0., 0., 1.], [0., .5, .5, 0.]]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a soft target (1, 2), the backprop is
# [0.032, 0.087 - 0.5 = -0.413, 0.237 - 0.5 = -0.263, 0.644]
# The loss for this batch is [0.5 * -log(0.087), 0.5 * -log(0.237)]
# = [1.3862, 1.9401]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75], [0.0321, -0.4129, -0.2632,
0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 1.9401]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeBroadcast(self):
np_f = np.array([[1., 2., 3., 4.],
[1., 2., 3., 4.]]).astype(np.float32)
np_l = np.array([[0., 0., 0., 1.],
[0., .5, .5, 0.]]).astype(np.float32)
np_loss, np_backprop = self._npXent(np_f, np_l)
tf_f = constant_op.constant(
np.array([[1., 2., 3., 4.]]).astype(np.float32))
tf_l = constant_op.constant(
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu) as sess:
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
tf_f, tf_l)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
# are needed.
@test_util.run_deprecated_v1
def testFeatureBroadcast(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([[0., 0., 0., 1.]]).astype(np.float16),
with_placeholders=True)
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([[0.], [2.]]).astype(np.float16),
with_placeholders=True)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
with self.cached_session():
with self.assertRaises(ValueError):
gen_nn_ops.softmax_cross_entropy_with_logits(
[[0., 1.], [2., 3.]], [[0., 1., 0.], [1., 0., 0.]])
@test_util.run_deprecated_v1
def testNotMatrix(self):
with self.cached_session():
with self.assertRaises(ValueError):
gen_nn_ops.softmax_cross_entropy_with_logits([0., 1., 2., 3.],
[0., 1., 0., 1.])
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float16))
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
def testDouble(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float64))
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session() as sess:
l = constant_op.constant(
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
shape=[3, 4],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
# Check that no extra computation performed. When only first derivative is requested,
# second derivative must not be computed. So when there is no second derivative,
# there is no `BatchMatMul` op in the graph.
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertNotIn("BatchMatMul", op_names)
self.assertNotIn("BatchMatMulV2", op_names)
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
@test_util.run_deprecated_v1
def testGradientLabelWithV2(self):
with self.cached_session():
l = constant_op.constant(
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
shape=[3, 4],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits_v2(
labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(l, [3, 4], x, [3])
self.assertLess(err, 5e-8)
@test_util.run_deprecated_v1
def testSecondGradient(self):
with self.cached_session() as sess:
l = constant_op.constant(
[
0.0, 0.0, 1.0 / 3, 0.0, 1.0 / 3, 0.0, 0.0, 0.0, 0.0, 0.5 / 3, 0.0,
0.5 / 3
],
shape=[12],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[12],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
loss = math_ops.reduce_sum(x)
gradients = gradients_impl.gradients(loss, [f])[0]
err = gradient_checker.compute_gradient_error(f, [12], gradients, [12])
# Check that second derivative is calculated.
# (it is equivalent to being `BatchMatMul` op in the graph because of implementation of xentropy grad)
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertIn("BatchMatMulV2", op_names)
print("cross entropy hessian err = ", err)
self.assertLess(err, 5e-8)
def testWrapper(self):
features = np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(
np.float32)
labels = np.array([[[0., 0., 0., 1.], [0., 1., 0., 0.]],
[[0., 0.5, 0.5, 0.], [0.5, 0.5, 0., 0.]],
[[0., 1., 0., 0.], [0., 0., 1., 0.]]]).astype(
np.float32)
self._testXentWrapper(features, labels, dim=0, use_gpu=False)
self._testXentWrapper(features, labels, dim=0, use_gpu=True)
self._testXentWrapper(features, labels, dim=1, use_gpu=False)
self._testXentWrapper(features, labels, dim=1, use_gpu=True)
self._testXentWrapper(features, labels, dim=-1, use_gpu=False)
self._testXentWrapper(features, labels, dim=-1, use_gpu=True)
def testZeroDimension(self):
features = np.zeros([0, 2, 4]).astype(np.float32)
labels = np.zeros([0, 2, 4]).astype(np.float32)
np_loss, _ = self._npXent(features, labels)
with self.session(use_gpu=True) as sess:
loss = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=features)
tf_loss = self.evaluate(loss)
self.assertAllEqual(np_loss, tf_loss)
class XentBenchmark(test.Benchmark):
def benchmarkZeroDimension(self):
for (m, n, p, use_gpu) in itertools.product(
[128],
[10, 100, 1000, 10000, 100000],
[0.001, 0.01, 0.5, 0.99, 1.0],
[False]):
k = int(p * n)
if k == 0:
continue
name = "zero_dimension_m_%d_n_%d_k_%g_use_gpu_%s" % (m, n, k, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
labels = array_ops.zeros([0, 2, 4], dtype=dtypes.float32)
logits = array_ops.zeros([0, 2, 4], dtype=dtypes.float32)
op = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
with session.Session() as sess:
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
throughput = gb_processed_input / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkSingleClass(self):
for (m, n, p, use_gpu) in itertools.product(
[128],
[10, 100, 1000, 10000, 100000],
[0.001, 0.01, 0.5, 0.99, 1.0],
[False]):
k = int(p * n)
if k == 0:
continue
name = "single_class_m_%d_n_%d_k_%g_use_gpu_%s" % (m, n, k, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
labels = constant_op.constant([[1.], [-1.], [0.]],
dtype=dtypes.float32)
logits = constant_op.constant([[-1.], [0.], [1.]],
dtype=dtypes.float32)
op = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
with session.Session() as sess:
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
throughput = gb_processed_input / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
|
apache-2.0
|
alvaroaleman/ansible
|
lib/ansible/plugins/action/net_config.py
|
6
|
4154
|
#
# Copyright 2015 Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.network import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
gpl-3.0
|
bensternthal/bedrock
|
bedrock/mozorg/cron.py
|
25
|
1085
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf import settings
from django.core.cache import cache
import cronjobs
import feedparser
from bedrock.mozorg.models import TwitterCache
from bedrock.mozorg.util import get_tweets
@cronjobs.register
def update_feeds():
for name, url in settings.FEEDS.items():
feed_info = feedparser.parse(url)
# Cache for a year (it will be set by the cron job no matter
# what on a set interval)
cache.set('feeds-%s' % name, feed_info, 60 * 60 * 24 * 365)
@cronjobs.register
def update_tweets():
for account in settings.TWITTER_ACCOUNTS:
tweets = get_tweets(account)
if tweets:
account_cache, created = TwitterCache.objects.get_or_create(
account=account, defaults={'tweets': tweets})
if not created:
account_cache.tweets = tweets
account_cache.save()
|
mpl-2.0
|
marook/tagfs
|
src/test/tagfs_test_small/test_freebase_support_query.py
|
2
|
1239
|
#
# Copyright 2012 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import unittest
import tagfs.freebase_support as freebase_support
class WhenQueryWithOneFilerAndOneSelector(unittest.TestCase):
def setUp(self):
super(WhenQueryWithOneFilerAndOneSelector, self).setUp()
self.query = freebase_support.Query({'filter': 'filterValue', 'selector': None, })
def testThenSelectedKeysIsSelector(self):
self.assertEqual(list(self.query.selectedKeys), ['selector',])
def testThenQueryStringIs(self):
self.assertEqual(self.query.queryString, '{"filter":"filterValue","selector":[]}')
|
gpl-3.0
|
co63oc/p2pool
|
nattraverso/pynupnp/upnpxml.py
|
288
|
3026
|
"""
This module parse an UPnP device's XML definition in an Object.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{[email protected]<mailto:[email protected]>}
@version: 0.1.0
"""
__revision__ = "$id"
from xml.dom import minidom
import logging
# Allowed UPnP services to use when mapping ports/external addresses
WANSERVICES = ['urn:schemas-upnp-org:service:WANIPConnection:1',
'urn:schemas-upnp-org:service:WANPPPConnection:1']
class UPnPXml:
"""
This objects parses the XML definition, and stores the useful
results in attributes.
The device infos dictionnary may contain the following keys:
- friendlyname: A friendly name to call the device.
- manufacturer: A manufacturer name for the device.
Here are the different attributes:
- deviceinfos: A dictionnary of device infos as defined above.
- controlurl: The control url, this is the url to use when sending SOAP
requests to the device, relative to the base url.
- wanservice: The WAN service to be used, one of the L{WANSERVICES}
- urlbase: The base url to use when talking in SOAP to the device.
The full url to use is obtained by urljoin(urlbase, controlurl)
"""
def __init__(self, xml):
"""
Parse the given XML string for UPnP infos. This creates the attributes
when they are found, or None if no value was found.
@param xml: a xml string to parse
"""
logging.debug("Got UPNP Xml description:\n%s", xml)
doc = minidom.parseString(xml)
# Fetch various device info
self.deviceinfos = {}
try:
attributes = {
'friendlyname':'friendlyName',
'manufacturer' : 'manufacturer'
}
device = doc.getElementsByTagName('device')[0]
for name, tag in attributes.iteritems():
try:
self.deviceinfos[name] = device.getElementsByTagName(
tag)[0].firstChild.datas.encode('utf-8')
except:
pass
except:
pass
# Fetch device control url
self.controlurl = None
self.wanservice = None
for service in doc.getElementsByTagName('service'):
try:
stype = service.getElementsByTagName(
'serviceType')[0].firstChild.data.encode('utf-8')
if stype in WANSERVICES:
self.controlurl = service.getElementsByTagName(
'controlURL')[0].firstChild.data.encode('utf-8')
self.wanservice = stype
break
except:
pass
# Find base url
self.urlbase = None
try:
self.urlbase = doc.getElementsByTagName(
'URLBase')[0].firstChild.data.encode('utf-8')
except:
pass
|
gpl-3.0
|
fredericlepied/ansible
|
lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
|
72
|
7106
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = """
module: ec2_scaling_policy
short_description: Create or delete AWS scaling policies for Autoscaling groups
description:
- Can create or delete scaling policies for autoscaling groups
- Referenced autoscaling groups must already exist
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the policy
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the scaling policy
required: true
asg_name:
description:
- Name of the associated autoscaling group
required: true
adjustment_type:
description:
- The type of change in capacity of the autoscaling group
required: false
choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
scaling_adjustment:
description:
- The amount by which the autoscaling group is adjusted by the policy
required: false
min_adjustment_step:
description:
- Minimum amount of adjustment when policy is triggered
required: false
cooldown:
description:
- The minimum period of time between which autoscaling actions can take place
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- ec2_scaling_policy:
state: present
region: US-XXX
name: "scaledown-policy"
adjustment_type: "ChangeInCapacity"
asg_name: "slave-pool"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 300
'''
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import ScalingPolicy
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_scaling_policy(connection, module):
sp_name = module.params.get('name')
adjustment_type = module.params.get('adjustment_type')
asg_name = module.params.get('asg_name')
scaling_adjustment = module.params.get('scaling_adjustment')
min_adjustment_step = module.params.get('min_adjustment_step')
cooldown = module.params.get('cooldown')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
if not scalingPolicies:
sp = ScalingPolicy(
name=sp_name,
adjustment_type=adjustment_type,
as_name=asg_name,
scaling_adjustment=scaling_adjustment,
min_adjustment_step=min_adjustment_step,
cooldown=cooldown)
try:
connection.create_scaling_policy(sp)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
policy = scalingPolicies[0]
changed = False
# min_adjustment_step attribute is only relevant if the adjustment_type
# is set to percentage change in capacity, so it is a special case
if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity':
if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
changed = True
# set the min adjustment step in case the user decided to change their
# adjustment type to percentage
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
# check the remaining attributes
for attr in ('adjustment_type','scaling_adjustment','cooldown'):
if getattr(policy, attr) != module.params.get(attr):
changed = True
setattr(policy, attr, module.params.get(attr))
try:
if changed:
connection.create_scaling_policy(policy)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
def delete_scaling_policy(connection, module):
sp_name = module.params.get('name')
asg_name = module.params.get('asg_name')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
if scalingPolicies:
try:
connection.delete_policy(sp_name, asg_name)
module.exit_json(changed=True)
except BotoServerError as e:
module.exit_json(changed=False, msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True, type='str'),
adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']),
asg_name = dict(required=True, type='str'),
scaling_adjustment = dict(type='int'),
min_adjustment_step = dict(type='int'),
cooldown = dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
state = module.params.get('state')
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg = str(e))
if state == 'present':
create_scaling_policy(connection, module)
elif state == 'absent':
delete_scaling_policy(connection, module)
if __name__ == '__main__':
main()
|
gpl-3.0
|
ChanChiChoi/scikit-learn
|
examples/ensemble/plot_adaboost_regression.py
|
311
|
1529
|
"""
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
tangyiyong/odoo
|
openerp/addons/base/res/__init__.py
|
384
|
1261
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_country
import res_lang
import res_partner
import res_bank
import res_config
import res_currency
import res_font
import res_company
import res_users
import res_request
import res_lang
import ir_property
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
bemehow/yum-s3-tools
|
scripts/find-package.py
|
1
|
1355
|
#!/usr/bin/env python
"""
Usage:
find-package.py --repourl=<repo-url> --package=<package-name> [--env|--iam] [--debug] [--filter=<filter>]
Attributes:
--repourl=<repourl> -r Repository URL eg. https://BUCKET_NAME.s3.amazonaws.com/cent6/
--package=<package-name> -p Package name to search for
--filter=<filter> -f Search only this sqlite database [default: primary]
--env Pull credentials from the environment
--iam Use IAM policy (Instance Profile) to obtain credentials
--debug Show more debug info
"""
from yums3tools import S3YumRepo
import docopt
def main():
arguments = docopt.docopt(__doc__)
debug = arguments['--debug']
if debug:
print arguments
#repo_url should follow the format of
#repo_url='https://BUCKET_NAME.s3.amazonaws.com/cent6/'
#hardcoding env auth for now
repo_url = arguments['--repourl']
package = arguments['--package']
#default filter, look only for packages in 'primary' sqlite, overrideable
filter = arguments['--filter']
repo = S3YumRepo('env', repo_url, filter, debug)
if package in repo.packages.keys():
print "Package {0} found! Version: {1}".format(package, repo.packages[package])
if __name__ == '__main__':
main()
|
apache-2.0
|
tecan/xchat-rt
|
plugins/scripts/Supybot-0.83.4.1-bitcoinotc-bot/build/lib/supybot/plugins/Lart/plugin.py
|
15
|
3812
|
###
# Copyright (c) 2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
class Lart(plugins.ChannelIdDatabasePlugin):
_meRe = re.compile(r'\bme\b', re.I)
_myRe = re.compile(r'\bmy\b', re.I)
def _replaceFirstPerson(self, s, nick):
s = self._meRe.sub(nick, s)
s = self._myRe.sub('%s\'s' % nick, s)
return s
def addValidator(self, irc, text):
if '$who' not in text:
irc.error('Larts must contain $who.', Raise=True)
def lart(self, irc, msg, args, channel, id, text):
"""[<channel>] [<id>] <who|what> [for <reason>]
Uses the Luser Attitude Readjustment Tool on <who|what> (for <reason>,
if given). If <id> is given, uses that specific lart. <channel> is
only necessary if the message isn't sent in the channel itself.
"""
if ' for ' in text:
(target, reason) = map(str.strip, text.split(' for ', 1))
else:
(target, reason) = (text, '')
if id is not None:
try:
lart = self.db.get(channel, id)
except KeyError:
irc.error(format('There is no lart with id #%i.', id))
return
else:
lart = self.db.random(channel)
if not lart:
irc.error(format('There are no larts in my database '
'for %s.', channel))
return
text = lart.text
if ircutils.strEqual(target, irc.nick):
target = msg.nick
reason = self._replaceFirstPerson('trying to dis me', irc.nick)
else:
target = self._replaceFirstPerson(target, msg.nick)
reason = self._replaceFirstPerson(reason, msg.nick)
if target.endswith('.'):
target = target.rstrip('.')
text = text.replace('$who', target)
if reason:
text += ' for ' + reason
if self.registryValue('showIds', channel):
text += format(' (#%i)', lart.id)
irc.reply(text, action=True)
lart = wrap(lart, ['channeldb', optional('id'), 'text'])
Class = Lart
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
gpl-2.0
|
krishnab-datakind/mining-data-acquisition
|
data_gather/HandlerSetRadius.py
|
1
|
1730
|
#!/usr/bin/python
"""
Handler for setting radius on the request.
"""
## MIT License
##
## Copyright (c) 2017, krishna bhogaonker
## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'krishna bhogaonker'
__copyright__ = 'copyright 2017'
__credits__ = ['krishna bhogaonker']
__license__ = "MIT"
__version__ = '0.1.0'
__maintainer__ = 'krishna bhogaonker'
__email__ = '[email protected]'
__status__ = 'pre-alpha'
from abcHandler import abcHandler
class HandlerSetRadius(abcHandler):
def handle(self):
self.request.set_radius(self.request.settings['radius'])
if self.successor is not None:
self.successor(self.request).handle()
if __name__ == "__main__":
print('Handler to set Radius.')
|
mit
|
detiber/ansible
|
lib/ansible/module_utils/database.py
|
401
|
5839
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014, Toshio Kuratomi <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class SQLParseError(Exception):
pass
class UnclosedQuoteError(SQLParseError):
pass
# maps a type of identifier to the maximum number of dot levels that are
# allowed to specify that identifier. For example, a database column can be
# specified by up to 4 levels: database.schema.table.column
_PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1)
_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
def _find_end_quote(identifier, quote_char):
accumulate = 0
while True:
try:
quote = identifier.index(quote_char)
except ValueError:
raise UnclosedQuoteError
accumulate = accumulate + quote
try:
next_char = identifier[quote+1]
except IndexError:
return accumulate
if next_char == quote_char:
try:
identifier = identifier[quote+2:]
accumulate = accumulate + 2
except IndexError:
raise UnclosedQuoteError
else:
return accumulate
def _identifier_parse(identifier, quote_char):
if not identifier:
raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
already_quoted = False
if identifier.startswith(quote_char):
already_quoted = True
try:
end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
except UnclosedQuoteError:
already_quoted = False
else:
if end_quote < len(identifier) - 1:
if identifier[end_quote+1] == '.':
dot = end_quote + 1
first_identifier = identifier[:dot]
next_identifier = identifier[dot+1:]
further_identifiers = _identifier_parse(next_identifier, quote_char)
further_identifiers.insert(0, first_identifier)
else:
raise SQLParseError('User escaped identifiers must escape extra quotes')
else:
further_identifiers = [identifier]
if not already_quoted:
try:
dot = identifier.index('.')
except ValueError:
identifier = identifier.replace(quote_char, quote_char*2)
identifier = ''.join((quote_char, identifier, quote_char))
further_identifiers = [identifier]
else:
if dot == 0 or dot >= len(identifier) - 1:
identifier = identifier.replace(quote_char, quote_char*2)
identifier = ''.join((quote_char, identifier, quote_char))
further_identifiers = [identifier]
else:
first_identifier = identifier[:dot]
next_identifier = identifier[dot+1:]
further_identifiers = _identifier_parse(next_identifier, quote_char)
first_identifier = first_identifier.replace(quote_char, quote_char*2)
first_identifier = ''.join((quote_char, first_identifier, quote_char))
further_identifiers.insert(0, first_identifier)
return further_identifiers
def pg_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='"')
if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
return '.'.join(identifier_fragments)
def mysql_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='`')
if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
special_cased_fragments = []
for fragment in identifier_fragments:
if fragment == '`*`':
special_cased_fragments.append('*')
else:
special_cased_fragments.append(fragment)
return '.'.join(special_cased_fragments)
|
gpl-3.0
|
dongjoon-hyun/tensorflow
|
tensorflow/python/layers/utils.py
|
31
|
8695
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains layer utilies for input validation and format conversion.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import variables
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.framework import smart_cond as smart_module
from tensorflow.python.util import nest
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim == 3:
return 'NWC'
elif ndim == 4:
return 'NHWC'
elif ndim == 5:
return 'NDHWC'
else:
raise ValueError('Input rank not supported:', ndim)
elif data_format == 'channels_first':
if ndim == 3:
return 'NCW'
elif ndim == 4:
return 'NCHW'
elif ndim == 5:
return 'NCDHW'
else:
raise ValueError('Input rank not supported:', ndim)
else:
raise ValueError('Invalid data_format:', data_format)
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable
of ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
def normalize_data_format(value):
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
def normalize_padding(value):
padding = value.lower()
if padding not in {'valid', 'same'}:
raise ValueError('The `padding` argument must be one of "valid", "same". '
'Received: ' + str(padding))
return padding
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding == 'same':
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Arguments:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
def deconv_output_length(input_length, filter_size, padding, stride):
"""Determines output length of a transposed convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
input_length *= stride
if padding == 'valid':
input_length += max(filter_size - stride, 0)
elif padding == 'full':
input_length -= (stride + filter_size - 2)
return input_length
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if isinstance(pred, variables.Variable):
return control_flow_ops.cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
return smart_module.smart_cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
def constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor, or the Python integer 1 or 0.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Variable, Tensor or bool, or Python
interger 1 or 0.
"""
# Allow integer booleans.
if isinstance(pred, int):
if pred == 1:
pred = True
elif pred == 0:
pred = False
if isinstance(pred, variables.Variable):
return None
return smart_module.smart_constant_value(pred)
def object_list_uid(object_list):
"""Creates a single string from object ids."""
object_list = nest.flatten(object_list)
return ', '.join([str(abs(id(x))) for x in object_list])
def static_shape(x):
"""Get the static shape of a Tensor, or None if it is unavailable."""
if x is None:
return None
try:
return tuple(x.get_shape().as_list())
except ValueError:
return None
def get_reachable_from_inputs(inputs, targets=None):
"""Returns the set of tensors reachable from `inputs`.
Stops if all targets have been found (target is optional).
Only valid in Symbolic mode, not Eager mode.
Args:
inputs: List of tensors.
targets: List of tensors.
Returns:
A set of tensors reachable from the inputs (includes the inputs themselves).
"""
reachable = set(inputs)
if targets:
targets = set(targets)
queue = inputs[:]
while queue:
x = queue.pop()
outputs = []
try:
consumers = x.consumers()
except AttributeError:
# Case where x is a variable type
consumers = [x.op]
for z in consumers:
consumer_outputs = z.outputs
if consumer_outputs: # May be None
outputs += consumer_outputs
for y in outputs:
if y not in reachable:
reachable.add(y)
queue.insert(0, y)
if targets and targets.issubset(reachable):
return reachable
return reachable
|
apache-2.0
|
Adnn/django
|
django/core/management/commands/dbshell.py
|
467
|
1192
|
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
class Command(BaseCommand):
help = ("Runs the command-line client for specified database, or the "
"default database if none is provided.")
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database onto which to '
'open a shell. Defaults to the "default" database.')
def handle(self, **options):
connection = connections[options.get('database')]
try:
connection.client.runshell()
except OSError:
# Note that we're assuming OSError means that the client program
# isn't installed. There's a possibility OSError would be raised
# for some other reason, in which case this error message would be
# inaccurate. Still, this message catches the common case.
raise CommandError('You appear not to have the %r program installed or on your path.' %
connection.client.executable_name)
|
bsd-3-clause
|
benoitsteiner/tensorflow-xsmm
|
tensorflow/contrib/estimator/python/estimator/baseline_test.py
|
9
|
15538
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for baseline.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib.estimator.python.estimator import baseline
from tensorflow.contrib.estimator.python.estimator import head as head_lib
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import optimizer
from tensorflow.python.training import saver
# Names of variables created by model.
BIAS_NAME = 'baseline/bias'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def _baseline_estimator_fn(
weight_column=None, label_dimension=1, *args, **kwargs):
"""Returns a BaselineEstimator that uses regression_head."""
return baseline.BaselineEstimator(
head=head_lib.regression_head(
weight_column=weight_column, label_dimension=label_dimension,
# Tests in core (from which this test inherits) test the sum loss.
loss_reduction=losses.Reduction.SUM),
*args, **kwargs)
class BaselineEstimatorEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_estimator = _baseline_estimator_fn(model_dir=self._model_dir)
eval_metrics = baseline_estimator.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
baseline_estimator = _baseline_estimator_fn(
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = baseline_estimator.evaluate(input_fn=_input_fn, steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
label_dim = 2
with ops.Graph().as_default():
variables.Variable([46.0, 58.0], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_estimator = _baseline_estimator_fn(
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = baseline_estimator.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaselineEstimatorPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_estimator = _baseline_estimator_fn(model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = baseline_estimator.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
with ops.Graph().as_default():
variables.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_estimator = _baseline_estimator_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = baseline_estimator.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = bias, shape=[batch_size, label_dimension]
self.assertAllClose([[0.2, 0.4, 0.6], [0.2, 0.4, 0.6]],
predicted_scores)
class BaselineEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_estimator_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaselineEstimatorTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
label_dimension,
expected_global_step,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([label_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratch(self):
# Create BaselineRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
baseline_estimator = _baseline_estimator_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_estimator.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=num_steps,
expected_bias=[0.])
def testFromCheckpoint(self):
# Create initial checkpoint.
bias = 7.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = 6.
# loss = (logits - label)^2 = (7 - 5)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_estimator = _baseline_estimator_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_estimator.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=[bias])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
Konubinix/lazygal
|
lazygal/generators.py
|
1
|
35055
|
# Lazygal, a lazy static web gallery generator.
# Copyright (C) 2007-2012 Alexandre Rossi <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import glob
import locale
import logging
import gc
import genshi
import sys
import re
from config import LazygalConfig, LazygalWebgalConfig
from config import USER_CONFIG_PATH, LazygalConfigDeprecated
from sourcetree import SOURCEDIR_CONFIGFILE
from pygexiv2 import GExiv2
import make
import pathutils
import sourcetree
import tpl
import newsize
import metadata
import genpage
import genmedia
import genfile
from lazygal import INSTALL_MODE, INSTALL_PREFIX
if INSTALL_MODE == 'source':
DATAPATH = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
elif INSTALL_MODE == 'installed':
DATAPATH = os.path.join(INSTALL_PREFIX, 'share', 'lazygal')
if not os.path.exists(os.path.join(DATAPATH, 'themes')):
print _('Could not find themes dir, check your installation!')
sys.exit(1)
DEST_SHARED_DIRECTORY_NAME = 'shared'
class SubgalSort(make.MakeTask):
"""
This task sorts the medias within a gallery according to the chosen rule.
"""
def __init__(self, webgal_dir):
make.MakeTask.__init__(self)
self.set_dep_only()
self.webgal_dir = webgal_dir
def build(self):
logging.info(_(" SORTING pics and subdirs"))
if self.webgal_dir.subgal_sort_by[0] == 'exif':
subgal_sorter = \
lambda x, y: x.source_dir.compare_latest_exif(y.source_dir)
elif self.webgal_dir.subgal_sort_by[0] == 'mtime':
subgal_sorter = \
lambda x, y: x.source_dir.compare_mtime(y.source_dir)
elif self.webgal_dir.subgal_sort_by[0] == 'dirname'\
or self.webgal_dir.subgal_sort_by[0] == 'filename': # Backward compatibility
subgal_sorter = \
lambda x, y: x.source_dir.compare_filename(y.source_dir)
else:
raise ValueError(_("Unknown sorting criterion '%s'")
% self.webgal_dir.subgal_sort_by[0])
self.webgal_dir.subgals.sort(subgal_sorter,
reverse=self.webgal_dir.subgal_sort_by[1])
if self.webgal_dir.pic_sort_by[0] == 'exif':
sorter = lambda x, y: x.media.compare_to_sort(y.media)
elif self.webgal_dir.pic_sort_by[0] == 'mtime':
sorter = lambda x, y: x.media.compare_mtime(y.media)
elif self.webgal_dir.pic_sort_by[0] == 'filename':
sorter = lambda x, y: x.media.compare_filename(y.media)
else:
raise ValueError(_("Unknown sorting criterion '%s'")
% self.webgal_dir.pic_sort_by[0])
self.webgal_dir.medias.sort(sorter,
reverse=self.webgal_dir.pic_sort_by[1])
# chain medias
previous = None
for media in self.webgal_dir.medias:
if previous:
previous.set_next(media)
media.set_previous(previous)
previous = media
class SubgalBreak(make.MakeTask):
"""
This task breaks galleries into multiple pages.
"""
def __init__(self, webgal_dir):
make.MakeTask.__init__(self)
self.webgal_dir = webgal_dir
self.__last_page_number = -1
def next_page_number(self):
self.__last_page_number += 1
return self.__last_page_number
def how_many_pages(self):
return self.__last_page_number + 1
def build(self):
logging.info(_(" BREAKING web gallery into multiple pages"))
if self.webgal_dir.thumbs_per_page == 0:
self.__fill_no_pagination()
else:
if self.webgal_dir.flatten_below():
self.__fill_loose_pagination()
else:
self.__fill_real_pagination()
def __fill_no_pagination(self):
galleries = []
galleries.append((self.webgal_dir, self.webgal_dir.medias))
if self.webgal_dir.flatten_below():
subgals = []
for dir in self.webgal_dir.get_all_subgals():
galleries.append((dir, dir.medias))
else:
subgals = self.webgal_dir.subgals
self.webgal_dir.add_index_page(subgals, galleries)
def __fill_loose_pagination(self):
"""
Loose pagination not breaking subgals (chosen if subgals are flattened).
"""
subgals = [] # No subgal links as they are flattened
galleries = []
how_many_medias = 0
subgals_it = iter([self.webgal_dir] + self.webgal_dir.get_all_subgals())
try:
while True:
subgal = subgals_it.next()
how_many_medias += subgal.get_media_count()
galleries.append((subgal, subgal.medias))
if how_many_medias > self.webgal_dir.thumbs_per_page:
self.webgal_dir.add_index_page(subgals, galleries)
galleries = []
how_many_medias = 0
except StopIteration:
if len(galleries) > 0:
self.webgal_dir.add_index_page(subgals, galleries)
def __fill_real_pagination(self):
medias_amount = len(self.webgal_dir.medias)
how_many_pages = medias_amount // self.webgal_dir.thumbs_per_page
if medias_amount == 0\
or medias_amount % self.webgal_dir.thumbs_per_page > 0:
how_many_pages = how_many_pages + 1
for page_number in range(0, how_many_pages):
step = page_number * self.webgal_dir.thumbs_per_page
end_index = step + self.webgal_dir.thumbs_per_page
shown_medias = self.webgal_dir.medias[step:end_index]
galleries = [(self.webgal_dir, shown_medias)]
# subgal links only for first page
if page_number == 0:
subgals = self.webgal_dir.subgals
else:
subgals = []
self.webgal_dir.add_index_page(subgals, galleries)
class WebalbumMediaTask(make.GroupTask):
def __init__(self, webgal, media):
super(WebalbumMediaTask, self).__init__()
self.webgal = webgal
self.media = media
self.previous = None
self.next = None
self.original = None
self.resized = {}
self.browse_pages = {}
for size_name in self.webgal.browse_sizes:
if self.webgal.newsizers[size_name] == 'original':
self.resized[size_name] = self.get_original()
else:
self.resized[size_name] = self.get_resized(size_name)
self.add_dependency(self.resized[size_name])
if self.webgal.original and not self.webgal.orig_base:
self.add_dependency(self.get_original())
if self.webgal.album.theme.kind == 'static':
self.browse_pages[size_name] = self.get_browse_page(size_name)
if self.webgal.album.force_gen_pages:
self.browse_pages[size_name].stamp_delete()
def set_next(self, media):
self.next = media
if media:
for bpage in self.browse_pages.values():
if media.thumb: bpage.add_dependency(media.thumb)
def set_previous(self, media):
self.previous = media
if media:
for bpage in self.browse_pages.values():
if media.thumb: bpage.add_dependency(media.thumb)
def get_original_or_symlink(self):
if not self.webgal.orig_symlink:
return genfile.CopyMediaOriginal(self.webgal, self.media)
else:
return genfile.SymlinkMediaOriginal(self.webgal, self.media)
def get_original(self):
if not self.original:
self.original = self.get_original_or_symlink()
return self.original
def get_browse_page(self, size_name):
return genpage.WebalbumBrowsePage(self.webgal, size_name, self)
def make(self):
super(WebalbumMediaTask, self).make()
self.webgal.media_done()
class WebalbumImageTask(WebalbumMediaTask):
"""
This task builds all items related to one picture.
"""
def __init__(self, webgal, image):
super(WebalbumImageTask, self).__init__(webgal, image)
self.thumb = genmedia.ImageOtherSize(self.webgal, self.media,
genmedia.THUMB_SIZE_NAME)
self.add_dependency(self.thumb)
def get_resized(self, size_name):
if self.webgal.newsizers[size_name] == 'original':
return self.get_original_or_symlink()
else:
sized = genmedia.ImageOtherSize(self.webgal, self.media, size_name)
self.media.get_size() # probe size to check if media is broken
if not self.media.broken\
and sized.get_size() == sized.source_media.get_size():
# Do not process if size is the same
return self.get_original()
else:
return sized
class WebalbumVideoTask(WebalbumMediaTask):
"""
This task builds all items related to one video.
"""
def __init__(self, webgal, video):
self.webvideo = None
super(WebalbumVideoTask, self).__init__(webgal, video)
self.thumb = genmedia.VideoThumb(self.webgal, self.media,
genmedia.THUMB_SIZE_NAME)
self.add_dependency(self.webvideo)
def get_resized(self, size_name):
if not self.webvideo:
self.webvideo = genmedia.WebVideo(self.webgal, self.media,
self.webgal.default_size_name)
return self.webvideo
class WebalbumDir(make.FileMakeObject):
"""
This is a built web gallery with its files, thumbs and reduced pics.
"""
def __init__(self, dir, subgals, album, album_dest_dir, progress=None):
self.source_dir = dir
self.path = os.path.join(album_dest_dir, self.source_dir.strip_root())
if self.path.endswith(os.sep): self.path = os.path.dirname(self.path)
super(WebalbumDir, self).__init__(self.path)
self.add_dependency(self.source_dir)
self.subgals = subgals
self.album = album
self.feed = None
self.flattening_dir = None
self.config = LazygalWebgalConfig(self.album.config)
self.__configure()
# Create the directory if it does not exist
if not os.path.isdir(self.path) and (self.source_dir.get_media_count() > 0):
logging.info(_(" MKDIR %%WEBALBUMROOT%%/%s")
% self.source_dir.strip_root())
logging.debug("(%s)" % self.path)
os.makedirs(self.path, mode=0755)
# Directory did not exist, mark it as so
self.stamp_delete()
tagfilters = self.config.getlist('webgal', 'filter-by-tag')
self.medias = []
self.sort_task = SubgalSort(self)
self.sort_task.add_dependency(self.source_dir)
for media in self.source_dir.medias:
self.sort_task.add_dependency(media)
if len(tagfilters) > 0 and media.info() is not None:
# tag-filtering is requested
res = True
for tagf in tagfilters:
# concatenate the list of tags as a string of words,
# space-separated. to ensure that we match the full
# keyword and not only a subpart of it, we also surround
# the matching pattern with spaces
# we look for tag words, partial matches are not wanted
regex = re.compile(r"\b" + tagf + r"\b")
kwlist = ' '.join(media.info().get_keywords())
if re.search(regex, kwlist) is None:
res = False
break
if res is False:
continue
if media.type == 'image':
media_task = WebalbumImageTask(self, media)
elif media.type == 'video':
media_task = WebalbumVideoTask(self, media)
else:
raise NotImplementedError("Unknown media type '%s'"
% media.type)
self.medias.append(media_task)
self.add_dependency(media_task)
if self.config.getboolean('webgal', 'dirzip')\
and self.get_media_count() > 1:
self.dirzip = genfile.WebalbumArchive(self)
self.add_dependency(self.dirzip)
else:
self.dirzip = None
self.index_pages = []
if not self.should_be_flattened():
self.break_task = SubgalBreak(self)
if self.thumbs_per_page > 0:
# FIXME: If pagination is 'on', galleries need to be sorted
# before being broken on multiple pages, and thus this slows
# down a lot the checking of a directory's need to be built.
self.break_task.add_dependency(self.sort_task)
# This task is special because it populates dependencies. This is
# why it needs to be built before a build check.
self.break_task.make()
self.webgal_pic = genmedia.WebalbumPicture(self)
self.add_dependency(self.webgal_pic)
else:
self.break_task = None
self.progress = progress
def __parse_browse_sizes(self, sizes_string):
for single_def in sizes_string.split(','):
name, string_size = single_def.split('=')
name = name.decode(locale.getpreferredencoding())
if name == '':
raise ValueError(_("Sizes is a comma-separated list of size names and specs:\n\t e.g. \"small=640x480,medium=1024x768\"."))
if name == genmedia.THUMB_SIZE_NAME:
raise ValueError(_("Size name '%s' is reserved for internal processing.") % genmedia.THUMB_SIZE_NAME)
self.__parse_size(name, string_size)
self.browse_sizes.append(name)
def __parse_size(self, size_name, size_string):
if size_string == '0x0':
self.newsizers[size_name] = 'original'
else:
try:
self.newsizers[size_name] = newsize.get_newsizer(size_string)
except newsize.NewsizeStringParseError:
raise ValueError(_("'%s' for size '%s' does not describe a known size syntax.") % (size_string.decode(locale.getpreferredencoding()), size_name, ))
def __parse_sort(self, sort_string):
try:
sort_method, reverse = sort_string.split(':')
except ValueError:
sort_method = sort_string
reverse = False
if reverse == 'reverse':
return sort_method, True
else:
return sort_method, False
def __load_tpl_vars(self):
# Load tpl vars from config
tpl_vars = {}
if self.config.has_section('template-vars'):
tpl_vars = {}
for option in self.config.options('template-vars'):
try:
value = self.config.getboolean('template-vars', option)
tpl_vars[option] = value
except ValueError:
value = self.config.get('template-vars', option)
value = value.decode(locale.getpreferredencoding())
tpl_vars[option] = genshi.core.Markup(value)
return tpl_vars
def __configure(self):
config_dirs = self.source_dir.parent_paths()[:-1] # strip root dir
config_dirs.reverse() # from root to deepest
config_files = map(lambda d: os.path.join(d, SOURCEDIR_CONFIGFILE),
config_dirs)
logging.debug(_(" Trying loading gallery configs: %s")
% ', '.join(map(self.source_dir.strip_root,
config_files)))
self.config.read(config_files)
self.browse_sizes = []
self.newsizers = {}
self.__parse_browse_sizes(self.config.get('webgal', 'image-size'))
self.__parse_size(genmedia.THUMB_SIZE_NAME,
self.config.get('webgal', 'thumbnail-size'))
self.default_size_name = self.browse_sizes[0]
self.tpl_vars = self.__load_tpl_vars()
styles = self.album.theme.get_avail_styles(
self.config.get('webgal', 'default-style'))
self.tpl_vars.update({'styles': styles})
self.set_original(self.config.getboolean('webgal', 'original'),
self.config.getstr('webgal', 'original-baseurl'),
self.config.getboolean('webgal', 'original-symlink'))
self.thumbs_per_page = self.config.getint('webgal', 'thumbs-per-page')
self.quality = self.config.getint('webgal', 'jpeg-quality')
self.save_options = {}
if self.config.getboolean('webgal', 'jpeg-optimize'):
self.save_options['optimize'] = True
if self.config.getboolean('webgal', 'jpeg-progressive'):
self.save_options['progressive'] = True
self.pic_sort_by = self.__parse_sort(self.config.get('webgal', 'sort-medias'))
self.subgal_sort_by = self.__parse_sort(self.config.get('webgal', 'sort-subgals'))
self.filter_by_tag = self.config.get('webgal', 'filter-by-tag')
self.webalbumpic_bg = self.config.get('webgal', 'webalbumpic-bg')
self.webalbumpic_type = self.config.get('webgal', 'webalbumpic-type')
try:
self.webalbumpic_size = map(int, self.config.get('webgal', 'webalbumpic-size').split('x'))
if len(self.webalbumpic_size) != 2:
raise ValueError
except ValueError:
logging.error(_('Bad syntax for webalbumpic-size.'))
sys.exit(1)
self.keep_gps = self.config.getboolean('webgal', 'keep-gps')
def set_original(self, original=False, orig_base=None, orig_symlink=False):
self.original = original or orig_symlink
self.orig_symlink = orig_symlink
if self.original and orig_base and not orig_symlink:
self.orig_base = orig_base
else:
self.orig_base = None
def get_webalbumpic_filename(self):
if self.webalbumpic_bg == 'transparent':
ext = '.png' # JPEG does not have an alpha channel
else:
ext = '.jpg'
return genmedia.WebalbumPicture.BASEFILENAME + ext
def _add_size_qualifier(self, path, size_name, force_extension=None):
filename, extension = os.path.splitext(path)
if force_extension is not None:
extension = force_extension
if size_name == self.default_size_name and extension == '.html':
# Do not append default size name to HTML page filename
return path
elif size_name in self.browse_sizes\
and self.newsizers[size_name] == 'original'\
and extension != '.html':
# Do not append size_name to unresized images.
return path
else:
return "%s_%s%s" % (filename, size_name, extension)
def add_index_page(self, subgals, galleries):
page_number = self.break_task.next_page_number()
pages = []
for size_name in self.browse_sizes:
page = genpage.WebalbumIndexPage(self, size_name, page_number,
subgals, galleries)
if self.album.force_gen_pages:
page.stamp_delete()
self.add_dependency(page)
pages.append(page)
self.index_pages.append(pages)
def register_output(self, output):
# We only care about output in the current directory
if os.path.dirname(output) == self.path:
super(WebalbumDir, self).register_output(output)
def register_feed(self, feed):
self.feed = feed
def get_subgal_count(self):
if self.flatten_below():
return 0
else:
len(self.source_dir.subdirs)
def get_all_subgals(self):
all_subgals = list(self.subgals) # We want a copy here.
for subgal in self.subgals:
all_subgals.extend(subgal.get_all_subgals())
return all_subgals
def get_media_count(self, media_type=None):
if media_type is None:
return len(self.medias)
else:
typed_media_count = 0
for mediatask in self.medias:
if mediatask.media.type == media_type:
typed_media_count += 1
return typed_media_count
def get_all_medias_tasks(self):
all_medias = list(self.medias) # We want a copy here.
for subgal in self.subgals:
all_medias.extend(subgal.get_all_medias_tasks())
return all_medias
def should_be_flattened(self, path=None):
if path is None: path = self.source_dir.path
return self.album.dir_flattening_depth is not False\
and self.source_dir.get_album_level(path) > self.album.dir_flattening_depth
def flatten_below(self):
if self.album.dir_flattening_depth is False:
return False
elif len(self.source_dir.subdirs) > 0:
# As all subdirs are at the same level, if one should be flattened,
# all should.
return self.subgals[0].should_be_flattened()
else:
return False
def rel_path_to_src(self, target_srcdir_path):
"""
Returns the relative path to go from this directory to
target_srcdir_path.
"""
return self.source_dir.rel_path(self.source_dir.path,
target_srcdir_path)
def rel_path(self, path):
"""
Returns the relative path to go from this directory to the path
supplied as argument.
"""
return os.path.relpath(path, self.path)
def flattening_srcpath(self, srcdir_path):
"""
Returns the source path in which srcdir_path should flattened, that is
the path of the gallery index that will point to srcdir_path's
pictures.
"""
if self.should_be_flattened(srcdir_path):
cur_path = srcdir_path
while self.should_be_flattened(cur_path):
cur_path, dummy = os.path.split(cur_path)
return cur_path
else:
return ''
def list_foreign_files(self):
foreign_files = []
# Check dest for junk files
extra_files = []
if self.source_dir.is_album_root():
extra_files.append(os.path.join(self.path,
DEST_SHARED_DIRECTORY_NAME))
dirnames = [d.name for d in self.source_dir.subdirs]
expected_dirs = map(lambda dn: os.path.join(self.path, dn), dirnames)
for dest_file in os.listdir(self.path):
dest_file = os.path.join(self.path, dest_file)
if not isinstance(dest_file, unicode):
# FIXME: No clue why this happens, but it happens!
dest_file = dest_file.decode(sys.getfilesystemencoding())
if dest_file not in self.output_items and\
dest_file not in expected_dirs and\
dest_file not in extra_files:
foreign_files.append(dest_file)
return foreign_files
def build(self):
for dest_file in self.list_foreign_files():
self.album.cleanup(dest_file)
def make(self, force=False):
needed_build = self.needs_build()
super(WebalbumDir, self).make(force or needed_build)
# Although we should have modified the directory contents and thus its
# mtime, it is possible that the directory mtime has not been updated
# if we regenerated without adding/removing pictures (to take into
# account a rotation for example). This is why we force directory mtime
# update here if something has been built.
if needed_build: os.utime(self.path, None)
def media_done(self):
if self.progress is not None:
self.progress.media_done()
class SharedFiles(make.FileMakeObject):
def __init__(self, album, dest_dir, tpl_vars):
self.path = os.path.join(dest_dir, DEST_SHARED_DIRECTORY_NAME)
self.album = album
# Create the shared files directory if it does not exist
if not os.path.isdir(self.path):
logging.info(_("MKDIR %SHAREDDIR%"))
logging.debug("(%s)" % self.path)
os.makedirs(self.path, mode=0755)
super(SharedFiles, self).__init__(self.path)
self.expected_shared_files = []
for shared_file in glob.glob(
os.path.join(self.album.theme.tpl_dir,
tpl.THEME_SHARED_FILE_PREFIX + '*')):
shared_file_name = os.path.basename(shared_file).\
replace(tpl.THEME_SHARED_FILE_PREFIX, '')
shared_file_dest = os.path.join(self.path,
shared_file_name)
if self.album.theme.tpl_loader.is_known_template_type(shared_file):
sf = genpage.SharedFileTemplate(album, shared_file,
shared_file_dest,
tpl_vars)
if self.album.force_gen_pages:
sf.stamp_delete()
self.expected_shared_files.append(sf.path)
else:
sf = genfile.SharedFileCopy(shared_file, shared_file_dest)
self.expected_shared_files.append(shared_file_dest)
self.add_dependency(sf)
def build(self):
# Cleanup themes files which are not in themes anymore.
for present_file in os.listdir(self.path):
file_path = os.path.join(self.path, present_file)
if file_path not in self.expected_shared_files:
self.album.cleanup(file_path)
class AlbumGenProgress(object):
def __init__(self, dirs_total, medias_total):
self._dirs_total = dirs_total
self._dirs_done = 0
self._medias_total = medias_total
self._medias_done = 0
def dir_done(self):
self._dirs_done = self._dirs_done + 1
self.updated()
def media_done(self, how_many=1):
self._medias_done = self._medias_done + how_many
self.updated()
def __unicode__(self):
return _("Progress: dir %d/%d (%d%%), media %d/%d (%d%%)")\
% (self._dirs_done, self._dirs_total,
100 * self._dirs_done // self._dirs_total,
self._medias_done, self._medias_total,
100 * self._medias_done // self._medias_total,
)
def updated(self):
pass
class Album(object):
def __init__(self, source_dir, config=None):
self.source_dir = os.path.abspath(source_dir)
self.config = LazygalConfig()
logging.info(_("Trying loading user config %s") % USER_CONFIG_PATH)
self.config.read(USER_CONFIG_PATH)
sourcedir_configfile = os.path.join(source_dir, SOURCEDIR_CONFIGFILE)
if os.path.isfile(sourcedir_configfile):
logging.info(_("Loading root config %s") % sourcedir_configfile)
try:
self.config.read(sourcedir_configfile)
except LazygalConfigDeprecated:
logging.error(_("'%s' uses a deprecated syntax: please refer to lazygal.conf(5) manual page.") % sourcedir_configfile)
sys.exit(1)
if config is not None: # Supplied config
self.config.load(config)
if self.config.getboolean('runtime', 'quiet'):
logging.getLogger().setLevel(logging.ERROR)
if self.config.getboolean('runtime', 'debug'):
logging.getLogger().setLevel(logging.DEBUG)
GExiv2.log_set_level(GExiv2.LogLevel.INFO)
self.clean_dest = self.config.getboolean('global', 'clean-destination')
self.force_gen_pages = self.config.getboolean('global', 'force-gen-pages')
self.set_theme(self.config.get('global', 'theme'))
self.dir_flattening_depth = self.config.getint('global', 'dir-flattening-depth')
self.__statistics = None
def set_theme(self, theme=tpl.DEFAULT_THEME):
self.theme = tpl.Theme(os.path.join(DATAPATH, 'themes'), theme)
def _str_humanize(self, text):
dash_replaced = text.replace('_', ' ')
return dash_replaced
def is_in_sourcetree(self, path):
return pathutils.is_subdir_of(self.source_dir, path)
def cleanup(self, file_path):
text = ''
if self.clean_dest and not os.path.isdir(file_path):
os.unlink(file_path)
text = ''
else:
text = _('you should ')
logging.info(_(' %sRM %s') % (text, file_path))
def generate_default_metadata(self):
"""
Generate default metadata files if no exists.
"""
logging.debug(_("Generating metadata in %s") % self.source_dir)
for root, dirnames, filenames in pathutils.walk(self.source_dir):
filenames.sort() # This is required for the ignored files
# checks to be reliable.
source_dir = sourcetree.Directory(root, [], filenames, self)
logging.info(_("[Entering %%ALBUMROOT%%/%s]") % source_dir.strip_root())
logging.debug("(%s)" % source_dir.path)
metadata.DefaultMetadata(source_dir, self).make()
def stats(self):
if self.__statistics is None:
self.__statistics = {
'total' : 0,
'bydir' : {}
}
for root, dirnames, filenames in pathutils.walk(self.source_dir):
dir_medias = len([f for f in filenames\
if sourcetree.MediaHandler.is_known_media(f)])
self.__statistics['total'] = self.__statistics['total']\
+ dir_medias
self.__statistics['bydir'][root] = dir_medias
return self.__statistics
def generate(self, dest_dir=None, progress=None):
if dest_dir is None:
dest_dir = self.config.getstr('global', 'output-directory')
else:
dest_dir = dest_dir.decode(sys.getfilesystemencoding())
sane_dest_dir = os.path.abspath(os.path.expanduser(dest_dir))
pub_url = self.config.getstr('global', 'puburl')
check_all_dirs = self.config.getboolean('runtime', 'check-all-dirs')
if self.is_in_sourcetree(sane_dest_dir):
raise ValueError(_("Fatal error, web gallery directory is within source tree."))
logging.debug(_("Generating to %s") % sane_dest_dir)
if pub_url:
feed = genpage.WebalbumFeed(self, sane_dest_dir, pub_url)
else:
feed = None
dir_heap = {}
for root, dirnames, filenames in pathutils.walk(self.source_dir):
if root in dir_heap:
subdirs, subgals = dir_heap[root]
del dir_heap[root] # No need to keep it there
else:
subdirs = []
subgals = []
checked_dir = sourcetree.File(root, self)
if checked_dir.should_be_skipped():
logging.debug(_("(%s) has been skipped") % checked_dir.path)
continue
if checked_dir.path == os.path.join(sane_dest_dir,
DEST_SHARED_DIRECTORY_NAME):
logging.error(_("(%s) has been skipped because its name collides with the shared material directory name") % checked_dir.path)
continue
logging.info(_("[Entering %%ALBUMROOT%%/%s]") % checked_dir.strip_root())
logging.debug("(%s)" % checked_dir.path)
source_dir = sourcetree.Directory(root, subdirs, filenames, self)
destgal = WebalbumDir(source_dir, subgals, self, sane_dest_dir,
progress)
if source_dir.is_album_root():
# Use root config tpl vars for shared files
tpl_vars = destgal.tpl_vars
if source_dir.get_all_medias_count() < 1:
logging.debug(_("(%s) and childs have no known medias, skipped")
% source_dir.path)
continue
if not source_dir.is_album_root():
container_dirname = os.path.dirname(root)
if container_dirname not in dir_heap:
dir_heap[container_dirname] = ([], [])
container_subdirs, container_subgals = dir_heap[container_dirname]
container_subdirs.append(source_dir)
container_subgals.append(destgal)
if feed and source_dir.is_album_root():
feed.set_title(source_dir.human_name)
md = destgal.source_dir.metadata.get()
if 'album_description' in md.keys():
feed.set_description(md['album_description'])
destgal.register_output(feed.path)
if feed:
feed.push_dir(destgal)
destgal.register_feed(feed)
if check_all_dirs:
destgal.make()
elif destgal.needs_build():
destgal.make(force=True) # avoid another needs_build() call in make()
else:
if progress is not None:
progress.media_done(len(destgal.medias))
logging.debug(_(" SKIPPED because of mtime, touch source or use --check-all-dirs to override."))
# Force some memory cleanups, this is usefull for big albums.
del destgal
gc.collect()
if progress is not None:
progress.dir_done()
logging.info(_("[Leaving %%ALBUMROOT%%/%s]") % source_dir.strip_root())
if feed:
feed.make()
# Force to check for unexpected files
SharedFiles(self, sane_dest_dir, tpl_vars).make(True)
# vim: ts=4 sw=4 expandtab
|
gpl-2.0
|
linked67/p2pool-cryptcoin
|
p2pool/util/graph.py
|
226
|
7325
|
from __future__ import absolute_import
from __future__ import division
import math
from p2pool.util import math as math2
class DataViewDescription(object):
def __init__(self, bin_count, total_width):
self.bin_count = bin_count
self.bin_width = total_width/bin_count
def _shift(x, shift, pad_item):
left_pad = math2.clip(shift, (0, len(x)))
right_pad = math2.clip(-shift, (0, len(x)))
return [pad_item]*left_pad + x[right_pad:-left_pad if left_pad else None] + [pad_item]*right_pad
combine_bins = math2.add_dicts_ext(lambda (a1, b1), (a2, b2): (a1+a2, b1+b2), (0, 0))
nothing = object()
def keep_largest(n, squash_key=nothing, key=lambda x: x, add_func=lambda a, b: a+b):
def _(d):
items = sorted(d.iteritems(), key=lambda (k, v): (k != squash_key, key(v)), reverse=True)
while len(items) > n:
k, v = items.pop()
if squash_key is not nothing:
items[-1] = squash_key, add_func(items[-1][1], v)
return dict(items)
return _
def _shift_bins_so_t_is_not_past_end(bins, last_bin_end, bin_width, t):
# returns new_bins, new_last_bin_end
shift = max(0, int(math.ceil((t - last_bin_end)/bin_width)))
return _shift(bins, shift, {}), last_bin_end + shift*bin_width
class DataView(object):
def __init__(self, desc, ds_desc, last_bin_end, bins):
assert len(bins) == desc.bin_count
self.desc = desc
self.ds_desc = ds_desc
self.last_bin_end = last_bin_end
self.bins = bins
def _add_datum(self, t, value):
if not self.ds_desc.multivalues:
value = {'null': value}
elif self.ds_desc.multivalue_undefined_means_0 and 'null' not in value:
value = dict(value, null=0) # use null to hold sample counter
self.bins, self.last_bin_end = _shift_bins_so_t_is_not_past_end(self.bins, self.last_bin_end, self.desc.bin_width, t)
bin = int(math.floor((self.last_bin_end - t)/self.desc.bin_width))
assert bin >= 0
if bin < self.desc.bin_count:
self.bins[bin] = self.ds_desc.keep_largest_func(combine_bins(self.bins[bin], dict((k, (v, 1)) for k, v in value.iteritems())))
def get_data(self, t):
bins, last_bin_end = _shift_bins_so_t_is_not_past_end(self.bins, self.last_bin_end, self.desc.bin_width, t)
assert last_bin_end - self.desc.bin_width <= t <= last_bin_end
def _((i, bin)):
left, right = last_bin_end - self.desc.bin_width*(i + 1), min(t, last_bin_end - self.desc.bin_width*i)
center, width = (left+right)/2, right-left
if self.ds_desc.is_gauge and self.ds_desc.multivalue_undefined_means_0:
real_count = max([0] + [count for total, count in bin.itervalues()])
if real_count == 0:
val = None
else:
val = dict((k, total/real_count) for k, (total, count) in bin.iteritems())
default = 0
elif self.ds_desc.is_gauge and not self.ds_desc.multivalue_undefined_means_0:
val = dict((k, total/count) for k, (total, count) in bin.iteritems())
default = None
else:
val = dict((k, total/width) for k, (total, count) in bin.iteritems())
default = 0
if not self.ds_desc.multivalues:
val = None if val is None else val.get('null', default)
return center, val, width, default
return map(_, enumerate(bins))
class DataStreamDescription(object):
def __init__(self, dataview_descriptions, is_gauge=True, multivalues=False, multivalues_keep=20, multivalues_squash_key=None, multivalue_undefined_means_0=False, default_func=None):
self.dataview_descriptions = dataview_descriptions
self.is_gauge = is_gauge
self.multivalues = multivalues
self.keep_largest_func = keep_largest(multivalues_keep, multivalues_squash_key, key=lambda (t, c): t/c if self.is_gauge else t, add_func=lambda (a1, b1), (a2, b2): (a1+a2, b1+b2))
self.multivalue_undefined_means_0 = multivalue_undefined_means_0
self.default_func = default_func
class DataStream(object):
def __init__(self, desc, dataviews):
self.desc = desc
self.dataviews = dataviews
def add_datum(self, t, value=1):
for dv_name, dv in self.dataviews.iteritems():
dv._add_datum(t, value)
class HistoryDatabase(object):
@classmethod
def from_obj(cls, datastream_descriptions, obj={}):
def convert_bin(bin):
if isinstance(bin, dict):
return bin
total, count = bin
if not isinstance(total, dict):
total = {'null': total}
return dict((k, (v, count)) for k, v in total.iteritems()) if count else {}
def get_dataview(ds_name, ds_desc, dv_name, dv_desc):
if ds_name in obj:
ds_data = obj[ds_name]
if dv_name in ds_data:
dv_data = ds_data[dv_name]
if dv_data['bin_width'] == dv_desc.bin_width and len(dv_data['bins']) == dv_desc.bin_count:
return DataView(dv_desc, ds_desc, dv_data['last_bin_end'], map(convert_bin, dv_data['bins']))
elif ds_desc.default_func is None:
return DataView(dv_desc, ds_desc, 0, dv_desc.bin_count*[{}])
else:
return ds_desc.default_func(ds_name, ds_desc, dv_name, dv_desc, obj)
return cls(dict(
(ds_name, DataStream(ds_desc, dict(
(dv_name, get_dataview(ds_name, ds_desc, dv_name, dv_desc))
for dv_name, dv_desc in ds_desc.dataview_descriptions.iteritems()
)))
for ds_name, ds_desc in datastream_descriptions.iteritems()
))
def __init__(self, datastreams):
self.datastreams = datastreams
def to_obj(self):
return dict((ds_name, dict((dv_name, dict(last_bin_end=dv.last_bin_end, bin_width=dv.desc.bin_width, bins=dv.bins))
for dv_name, dv in ds.dataviews.iteritems())) for ds_name, ds in self.datastreams.iteritems())
def make_multivalue_migrator(multivalue_keys, post_func=lambda bins: bins):
def _(ds_name, ds_desc, dv_name, dv_desc, obj):
if not obj:
last_bin_end = 0
bins = dv_desc.bin_count*[{}]
else:
inputs = dict((k, obj.get(v, {dv_name: dict(bins=[{}]*dv_desc.bin_count, last_bin_end=0)})[dv_name]) for k, v in multivalue_keys.iteritems())
last_bin_end = max(inp['last_bin_end'] for inp in inputs.itervalues()) if inputs else 0
assert all(len(inp['bins']) == dv_desc.bin_count for inp in inputs.itervalues())
inputs = dict((k, dict(zip(['bins', 'last_bin_end'], _shift_bins_so_t_is_not_past_end(v['bins'], v['last_bin_end'], dv_desc.bin_width, last_bin_end)))) for k, v in inputs.iteritems())
assert len(set(inp['last_bin_end'] for inp in inputs.itervalues())) <= 1
bins = post_func([dict((k, v['bins'][i]['null']) for k, v in inputs.iteritems() if 'null' in v['bins'][i]) for i in xrange(dv_desc.bin_count)])
return DataView(dv_desc, ds_desc, last_bin_end, bins)
return _
|
gpl-3.0
|
shawnadelic/shuup
|
shuup/admin/modules/products/views/list.py
|
1
|
2246
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from shuup.admin.utils.picotable import ChoicesFilter, Column, TextFilter
from shuup.admin.utils.views import PicotableListView
from shuup.core.models import Category, Product, ProductMode
class ProductListView(PicotableListView):
model = Product
default_columns = [
Column("sku", _(u"SKU"), display="sku", filter_config=TextFilter(placeholder=_("Filter by SKU..."))),
Column("name", _(u"Name"), sort_field="translations__name", display="name", filter_config=TextFilter(
filter_field="translations__name",
placeholder=_("Filter by name...")
)),
Column("barcode", _(u"Barcode"), display="barcode", filter_config=TextFilter(_("Filter by barcode..."))),
Column("type", _(u"Type")),
Column("mode", _(u"Mode"), filter_config=ChoicesFilter(ProductMode.choices)),
Column("category", _(u"Primary Category"), filter_config=ChoicesFilter(Category.objects.all(), "category")),
]
def get_queryset(self):
filter = self.get_filter()
shop_id = filter.get("shop")
qs = Product.objects.all_except_deleted()
q = Q()
for mode in filter.get("modes", []):
q |= Q(mode=mode)
manufacturer_ids = filter.get("manufacturers")
if manufacturer_ids:
q |= Q(manufacturer_id__in=manufacturer_ids)
qs = qs.filter(q)
if shop_id:
qs = qs.filter(shop_products__shop_id=int(shop_id))
return qs
def get_object_abstract(self, instance, item):
return [
{"text": "%s" % instance, "class": "header"},
{"title": _(u"Barcode"), "text": item.get("barcode")},
{"title": _(u"SKU"), "text": item.get("sku")},
{"title": _(u"Type"), "text": item.get("type")},
{"title": _(u"Primary Category"), "text": item.get("category")}
]
|
agpl-3.0
|
alobbs/ansible
|
samples/multi_queues.py
|
38
|
4867
|
#!/usr/bin/env python
import sys
import time
import Queue
import traceback
import multiprocessing
from ansible.inventory import Inventory
from ansible.inventory.host import Host
from ansible.playbook.play import Play
from ansible.playbook.task import Task
from ansible.executor.connection_info import ConnectionInformation
from ansible.executor.task_executor import TaskExecutor
from ansible.executor.task_result import TaskResult
from ansible.parsing import DataLoader
from ansible.vars import VariableManager
from ansible.utils.debug import debug
NUM_WORKERS = 20
NUM_HOSTS = 1778
NUM_TASKS = 1
def results(final_q, workers):
cur_worker = 0
def _read_worker_result(cur_worker):
result = None
starting_point = cur_worker
while True:
(worker_prc, main_q, res_q) = workers[cur_worker]
cur_worker += 1
if cur_worker >= len(workers):
cur_worker = 0
try:
if not res_q.empty():
debug("worker %d has data to read" % cur_worker)
result = res_q.get()
debug("got a result from worker %d: %s" % (cur_worker, result))
break
except:
pass
if cur_worker == starting_point:
break
return (result, cur_worker)
while True:
result = None
try:
(result, cur_worker) = _read_worker_result(cur_worker)
if result is None:
time.sleep(0.01)
continue
final_q.put(result, block=False)
except (IOError, EOFError, KeyboardInterrupt) as e:
debug("got a breaking error: %s" % e)
break
except Exception as e:
debug("EXCEPTION DURING RESULTS PROCESSING: %s" % e)
traceback.print_exc()
break
def worker(main_q, res_q, loader):
while True:
task = None
try:
if not main_q.empty():
(host, task, task_vars, conn_info) = main_q.get(block=False)
executor_result = TaskExecutor(host, task, task_vars, conn_info, loader).run()
debug("executor result: %s" % executor_result)
task_result = TaskResult(host, task, executor_result)
res_q.put(task_result)
else:
time.sleep(0.01)
except Queue.Empty:
pass
except (IOError, EOFError, KeyboardInterrupt) as e:
debug("got a breaking error: %s" % e)
break
except Exception as e:
debug("EXCEPTION DURING WORKER PROCESSING: %s" % e)
traceback.print_exc()
break
loader = DataLoader()
workers = []
for i in range(NUM_WORKERS):
main_q = multiprocessing.Queue()
res_q = multiprocessing.Queue()
worker_p = multiprocessing.Process(target=worker, args=(main_q, res_q, loader))
worker_p.start()
workers.append((worker_p, main_q, res_q))
res_q = multiprocessing.Queue()
res_p = multiprocessing.Process(target=results, args=(res_q, workers))
res_p.start()
def send_data(obj):
global cur_worker
global workers
global pending_results
(w_proc, main_q, wrkr_q) = workers[cur_worker]
cur_worker += 1
if cur_worker >= len(workers):
cur_worker = 0
pending_results += 1
main_q.put(obj, block=False)
def _process_pending_results():
global res_q
global pending_results
while not res_q.empty():
try:
result = res_q.get(block=False)
debug("got final result: %s" % (result,))
pending_results -= 1
except Queue.Empty:
pass
def _wait_on_pending_results():
global pending_results
while pending_results > 0:
debug("waiting for pending results (%d left)" % pending_results)
_process_pending_results()
time.sleep(0.01)
debug("starting")
cur_worker = 0
pending_results = 0
var_manager = VariableManager()
debug("loading inventory")
inventory = Inventory(host_list='/tmp/med_inventory', loader=loader, variable_manager=var_manager)
hosts = inventory.get_hosts()[:]
debug("done loading inventory")
ci = ConnectionInformation()
ci.connection = 'local'
for i in range(NUM_TASKS):
#for j in range(NUM_HOSTS):
for h in hosts:
debug("queuing %s %d" % (h, i))
#h = Host(name="host%06d" % j)
t = Task().load(dict(name="task %d" % (i,), debug="msg='hello from %s, %d'" % (h,i)))
#t = Task().load(dict(name="task %d" % (i,), ping=""))
#task_vars = var_manager.get_vars(loader=loader, host=h, task=t)
task_vars = dict()
new_t = t.copy()
new_t.post_validate(task_vars)
send_data((h, t, task_vars, ci))
debug("done queuing %s %d" % (h, i))
_process_pending_results()
debug("waiting for the results to drain...")
_wait_on_pending_results()
res_q.close()
res_p.terminate()
for (w_p, main_q, wrkr_q) in workers:
main_q.close()
wrkr_q.close()
w_p.terminate()
debug("done")
|
gpl-3.0
|
manahl/pytest-plugins
|
pytest-pyramid-server/setup.py
|
1
|
1728
|
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from setuptools import setup
from common_setup import common_setup
classifiers = [
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Framework :: Pyramid',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
install_requires = ['pytest-server-fixtures',
'pytest',
'pyramid',
'waitress',
'six',
]
tests_require = [
'pyramid-debugtoolbar',
]
entry_points = {
'pytest11': [
'pyramid_server = pytest_pyramid_server',
],
'paste.app_factory': [
'pyramid_server_test = pyramid_server_test:main',
],
}
if __name__ == '__main__':
kwargs = common_setup('pytest_pyramid_server')
kwargs.update(dict(
name='pytest-pyramid-server',
description='Pyramid server fixture for py.test',
author='Edward Easton',
author_email='[email protected]',
classifiers=classifiers,
install_requires=install_requires,
tests_require=tests_require,
py_modules=['pytest_pyramid_server', 'pyramid_server_test'],
entry_points=entry_points,
))
setup(**kwargs)
|
mit
|
jorik041/scrapy
|
scrapy/contrib/linkextractors/lxmlhtml.py
|
14
|
4121
|
"""
Link extractor based on lxml.html
"""
import re
from six.moves.urllib.parse import urlparse, urljoin
import lxml.etree as etree
from scrapy.selector import Selector
from scrapy.link import Link
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.python import unique as unique_list, str_to_unicode
from scrapy.linkextractor import FilteringLinkExtractor
from scrapy.utils.response import get_base_url
# from lxml/src/lxml/html/__init__.py
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
_collect_string_content = etree.XPath("string()")
def _nons(tag):
if isinstance(tag, basestring):
if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:
return tag.split('}')[-1]
return tag
class LxmlParserLinkExtractor(object):
def __init__(self, tag="a", attr="href", process=None, unique=False):
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _iter_links(self, document):
for el in document.iter(etree.Element):
if not self.scan_tag(_nons(el.tag)):
continue
attribs = el.attrib
for attrib in attribs:
if not self.scan_attr(attrib):
continue
yield (el, attrib, attribs[attrib])
def _extract_links(self, selector, response_url, response_encoding, base_url):
links = []
# hacky way to get the underlying lxml parsed document
for el, attr, attr_val in self._iter_links(selector._root):
# pseudo lxml.html.HtmlElement.make_links_absolute(base_url)
attr_val = urljoin(base_url, attr_val)
url = self.process_attr(attr_val)
if url is None:
continue
if isinstance(url, unicode):
url = url.encode(response_encoding)
# to fix relative links after process_value
url = urljoin(response_url, url)
link = Link(url, _collect_string_content(el) or u'',
nofollow=True if el.get('rel') == 'nofollow' else False)
links.append(link)
return unique_list(links, key=lambda link: link.url) \
if self.unique else links
def extract_links(self, response):
html = Selector(response)
base_url = get_base_url(response)
return self._extract_links(html, response.url, response.encoding, base_url)
def _process_links(self, links):
""" Normalize and filter extracted links
The subclass should override it if neccessary
"""
links = unique_list(links, key=lambda link: link.url) if self.unique else links
return links
class LxmlLinkExtractor(FilteringLinkExtractor):
def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True, process_value=None,
deny_extensions=None):
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
tag_func = lambda x: x in tags
attr_func = lambda x: x in attrs
lx = LxmlParserLinkExtractor(tag=tag_func, attr=attr_func,
unique=unique, process=process_value)
super(LxmlLinkExtractor, self).__init__(lx, allow, deny,
allow_domains, deny_domains, restrict_xpaths, canonicalize,
deny_extensions)
def extract_links(self, response):
html = Selector(response)
base_url = get_base_url(response)
if self.restrict_xpaths:
docs = [subdoc
for x in self.restrict_xpaths
for subdoc in html.xpath(x)]
else:
docs = [html]
all_links = []
for doc in docs:
links = self._extract_links(doc, response.url, response.encoding, base_url)
all_links.extend(self._process_links(links))
return unique_list(all_links)
|
bsd-3-clause
|
aperigault/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_portforward.py
|
38
|
12535
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_portforward
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
description:
- Create, update and remove port forwarding rules.
version_added: '2.0'
author: René Moser (@resmo)
options:
ip_address:
description:
- Public IP address the rule is assigned to.
type: str
required: true
vm:
description:
- Name of virtual machine which we make the port forwarding rule for.
- Required if I(state=present).
type: str
state:
description:
- State of the port forwarding rule.
type: str
default: present
choices: [ present, absent ]
protocol:
description:
- Protocol of the port forwarding rule.
type: str
default: tcp
choices: [ tcp, udp ]
public_port:
description:
- Start public port for this rule.
type: int
required: true
public_end_port:
description:
- End public port for this rule.
- If not specified equal I(public_port).
type: int
private_port:
description:
- Start private port for this rule.
type: int
required: true
private_end_port:
description:
- End private port for this rule.
- If not specified equal I(private_port).
type: int
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
default: no
type: bool
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the port forwarding rule.
type: str
network:
description:
- Name of the network.
type: str
version_added: '2.3'
vpc:
description:
- Name of the VPC.
version_added: '2.3'
type: str
domain:
description:
- Domain the I(vm) is related to.
type: str
account:
description:
- Account the I(vm) is related to.
type: str
project:
description:
- Name of the project the I(vm) is located in.
type: str
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
type: str
poll_async:
description:
- Poll async jobs until job has finished.
default: yes
type: bool
tags:
description:
- List of tags. Tags are a list of dictionaries having keys I(key) and I(value).
- "To delete all tags, set a empty list e.g. I(tags: [])."
type: list
aliases: [ tag ]
version_added: '2.4'
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: 1.2.3.4:80 -> web01:8080
cs_portforward:
ip_address: 1.2.3.4
vm: web01
public_port: 80
private_port: 8080
delegate_to: localhost
- name: forward SSH and open firewall
cs_portforward:
ip_address: '{{ public_ip }}'
vm: '{{ inventory_hostname }}'
public_port: '{{ ansible_ssh_port }}'
private_port: 22
open_firewall: true
delegate_to: localhost
- name: forward DNS traffic, but do not open firewall
cs_portforward:
ip_address: 1.2.3.4
vm: '{{ inventory_hostname }}'
public_port: 53
private_port: 53
protocol: udp
delegate_to: localhost
- name: remove ssh port forwarding
cs_portforward:
ip_address: 1.2.3.4
public_port: 22
private_port: 22
state: absent
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the public IP address.
returned: success
type: str
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: str
sample: 1.2.3.4
protocol:
description: Protocol.
returned: success
type: str
sample: tcp
private_port:
description: Start port on the virtual machine's IP address.
returned: success
type: int
sample: 80
private_end_port:
description: End port on the virtual machine's IP address.
returned: success
type: int
sample: 80
public_port:
description: Start port on the public IP address.
returned: success
type: int
sample: 80
public_end_port:
description: End port on the public IP address.
returned: success
type: int
sample: 80
tags:
description: Tags related to the port forwarding.
returned: success
type: list
sample: []
vm_name:
description: Name of the virtual machine.
returned: success
type: str
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: str
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: str
sample: 10.101.65.152
vpc:
description: Name of the VPC.
returned: success
type: str
sample: my_vpc
network:
description: Name of the network.
returned: success
type: str
sample: dmz
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackPortforwarding, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmguestip': 'vm_guest_ip',
'publicip': 'public_ip',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'publicendport': 'public_end_port',
'privateport': 'private_port',
'privateendport': 'private_end_port',
}
self.portforwarding_rule = None
def get_portforwarding_rule(self):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
args = {
'ipaddressid': self.get_ip_address(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
portforwarding_rules = self.query_api('listPortForwardingRules', **args)
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
for rule in portforwarding_rules['portforwardingrule']:
if (protocol == rule['protocol'] and
public_port == int(rule['publicport'])):
self.portforwarding_rule = rule
break
return self.portforwarding_rule
def present_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
else:
portforwarding_rule = self.create_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.ensure_tags(resource=portforwarding_rule, resource_type='PortForwardingRule')
self.portforwarding_rule = portforwarding_rule
return portforwarding_rule
def create_portforwarding_rule(self):
args = {
'protocol': self.module.params.get('protocol'),
'publicport': self.module.params.get('public_port'),
'publicendport': self.get_or_fallback('public_end_port', 'public_port'),
'privateport': self.module.params.get('private_port'),
'privateendport': self.get_or_fallback('private_end_port', 'private_port'),
'openfirewall': self.module.params.get('open_firewall'),
'vmguestip': self.get_vm_guest_ip(),
'ipaddressid': self.get_ip_address(key='id'),
'virtualmachineid': self.get_vm(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'networkid': self.get_network(key='id'),
}
portforwarding_rule = None
self.result['changed'] = True
if not self.module.check_mode:
portforwarding_rule = self.query_api('createPortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def update_portforwarding_rule(self, portforwarding_rule):
args = {
'protocol': self.module.params.get('protocol'),
'publicport': self.module.params.get('public_port'),
'publicendport': self.get_or_fallback('public_end_port', 'public_port'),
'privateport': self.module.params.get('private_port'),
'privateendport': self.get_or_fallback('private_end_port', 'private_port'),
'vmguestip': self.get_vm_guest_ip(),
'ipaddressid': self.get_ip_address(key='id'),
'virtualmachineid': self.get_vm(key='id'),
'networkid': self.get_network(key='id'),
}
if self.has_changed(args, portforwarding_rule):
self.result['changed'] = True
if not self.module.check_mode:
# API broken in 4.2.1?, workaround using remove/create instead of update
# portforwarding_rule = self.query_api('updatePortForwardingRule', **args)
self.absent_portforwarding_rule()
portforwarding_rule = self.query_api('createPortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def absent_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
self.result['changed'] = True
args = {
'id': portforwarding_rule['id'],
}
if not self.module.check_mode:
res = self.query_api('deletePortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'portforwardingrule')
return portforwarding_rule
def get_result(self, portforwarding_rule):
super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
if portforwarding_rule:
for search_key, return_key in self.returns_to_int.items():
if search_key in portforwarding_rule:
self.result[return_key] = int(portforwarding_rule[search_key])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address=dict(required=True),
protocol=dict(choices=['tcp', 'udp'], default='tcp'),
public_port=dict(type='int', required=True),
public_end_port=dict(type='int'),
private_port=dict(type='int', required=True),
private_end_port=dict(type='int'),
state=dict(choices=['present', 'absent'], default='present'),
open_firewall=dict(type='bool', default=False),
vm_guest_ip=dict(),
vm=dict(),
vpc=dict(),
network=dict(),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_pf = AnsibleCloudStackPortforwarding(module)
state = module.params.get('state')
if state in ['absent']:
pf_rule = acs_pf.absent_portforwarding_rule()
else:
pf_rule = acs_pf.present_portforwarding_rule()
result = acs_pf.get_result(pf_rule)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
raghavs1108/DataPlotter
|
pyqtgraph/util/cprint.py
|
38
|
3028
|
"""
Cross-platform color text printing
Based on colorama (see pyqtgraph/util/colorama/README.txt)
"""
import sys, re
from .colorama.winterm import WinTerm, WinColor, WinStyle
from .colorama.win32 import windll
from ..python2_3 import basestring
_WIN = sys.platform.startswith('win')
if windll is not None:
winterm = WinTerm()
else:
_WIN = False
def winset(reset=False, fore=None, back=None, style=None, stderr=False):
if reset:
winterm.reset_all()
if fore is not None:
winterm.fore(fore, stderr)
if back is not None:
winterm.back(back, stderr)
if style is not None:
winterm.style(style, stderr)
ANSI = {}
WIN = {}
for i,color in enumerate(['BLACK', 'RED', 'GREEN', 'YELLOW', 'BLUE', 'MAGENTA', 'CYAN', 'WHITE']):
globals()[color] = i
globals()['BR_' + color] = i + 8
globals()['BACK_' + color] = i + 40
ANSI[i] = "\033[%dm" % (30+i)
ANSI[i+8] = "\033[2;%dm" % (30+i)
ANSI[i+40] = "\033[%dm" % (40+i)
color = 'GREY' if color == 'WHITE' else color
WIN[i] = {'fore': getattr(WinColor, color), 'style': WinStyle.NORMAL}
WIN[i+8] = {'fore': getattr(WinColor, color), 'style': WinStyle.BRIGHT}
WIN[i+40] = {'back': getattr(WinColor, color)}
RESET = -1
ANSI[RESET] = "\033[0m"
WIN[RESET] = {'reset': True}
def cprint(stream, *args, **kwds):
"""
Print with color. Examples::
# colors are BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE
cprint('stdout', RED, 'This is in red. ', RESET, 'and this is normal\n')
# Adding BR_ before the color manes it bright
cprint('stdout', BR_GREEN, 'This is bright green.\n', RESET)
# Adding BACK_ changes background color
cprint('stderr', BACK_BLUE, WHITE, 'This is white-on-blue.', -1)
# Integers 0-7 for normal, 8-15 for bright, and 40-47 for background.
# -1 to reset.
cprint('stderr', 1, 'This is in red.', -1)
"""
if isinstance(stream, basestring):
stream = kwds.get('stream', 'stdout')
err = stream == 'stderr'
stream = getattr(sys, stream)
else:
err = kwds.get('stderr', False)
if hasattr(stream, 'isatty') and stream.isatty():
if _WIN:
# convert to win32 calls
for arg in args:
if isinstance(arg, basestring):
stream.write(arg)
else:
kwds = WIN[arg]
winset(stderr=err, **kwds)
else:
# convert to ANSI
for arg in args:
if isinstance(arg, basestring):
stream.write(arg)
else:
stream.write(ANSI[arg])
else:
# ignore colors
for arg in args:
if isinstance(arg, basestring):
stream.write(arg)
def cout(*args):
"""Shorthand for cprint('stdout', ...)"""
cprint('stdout', *args)
def cerr(*args):
"""Shorthand for cprint('stderr', ...)"""
cprint('stderr', *args)
|
mit
|
robovm/robovm-studio
|
python/testData/inspections/PyTypeCheckerInspection/Generator.py
|
25
|
3013
|
def test():
def gen(n):
for x in xrange(n):
yield str(x)
def f_1(xs):
"""
:type xs: list of int
"""
return xs
def f_2(xs):
"""
:type xs: collections.Sequence of int
"""
return xs
def f_3(xs):
"""
:type xs: collections.Container of int
"""
return xs
def f_4(xs):
"""
:type xs: collections.Iterator of int
"""
return xs
def f_5(xs):
"""
:type xs: collections.Iterable of int
"""
return xs
def f_6(xs):
"""
:type xs: list
"""
return xs
def f_7(xs):
"""
:type xs: collections.Sequence
"""
return xs
def f_8(xs):
"""
:type xs: collections.Container
"""
return xs
def f_9(xs):
"""
:type xs: collections.Iterator
"""
return xs
def f_10(xs):
"""
:type xs: collections.Iterable
"""
return xs
def f_11(xs):
"""
:type xs: list of string
"""
return xs
def f_12(xs):
"""
:type xs: collections.Sequence of string
"""
return xs
def f_13(xs):
"""
:type xs: collections.Container of string
"""
return xs
def f_14(xs):
"""
:type xs: collections.Iterator of string
"""
return xs
def f_15(xs):
"""
:type xs: collections.Iterable of string
"""
return xs
return [
''.join(gen(10)),
f_1(<warning descr="Expected type 'list[int]', got '__generator[str]' instead">gen(11)</warning>),
f_2(<warning descr="Expected type 'Sequence[int]', got '__generator[str]' instead">gen(11)</warning>),
f_3(<warning descr="Expected type 'Container[int]', got '__generator[str]' instead">gen(11)</warning>),
f_4(<warning descr="Expected type 'Iterator[int]', got '__generator[str]' instead">gen(11)</warning>),
f_5(<warning descr="Expected type 'Iterable[int]', got '__generator[str]' instead">gen(11)</warning>),
f_6(<warning descr="Expected type 'list', got '__generator[str]' instead">gen(11)</warning>),
f_7(<warning descr="Expected type 'Sequence', got '__generator[str]' instead">gen(11)</warning>),
f_8(<warning descr="Expected type 'Container', got '__generator[str]' instead">gen(11)</warning>),
f_9(gen(11)),
f_10(gen(11)),
f_11(<warning descr="Expected type 'list[Union[str, unicode]]', got '__generator[str]' instead">gen(11)</warning>),
f_12(<warning descr="Expected type 'Sequence[Union[str, unicode]]', got '__generator[str]' instead">gen(11)</warning>),
f_13(<warning descr="Expected type 'Container[Union[str, unicode]]', got '__generator[str]' instead">gen(11)</warning>),
f_14(gen(11)),
f_15(gen(11)),
f_15('foo'.split('o')),
]
|
apache-2.0
|
gslate-cm9/android_kernel_lge_v909
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
5411
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
gpl-2.0
|
mleinart/graphite-web
|
webapp/graphite/remote_storage.py
|
8
|
8672
|
import socket
import time
import httplib
from urllib import urlencode
from threading import Lock, Event
from django.conf import settings
from django.core.cache import cache
from graphite.node import LeafNode, BranchNode
from graphite.intervals import Interval, IntervalSet
from graphite.readers import FetchInProgress
from graphite.logger import log
try:
import cPickle as pickle
except ImportError:
import pickle
class RemoteStore(object):
lastFailure = 0.0
available = property(lambda self: time.time() - self.lastFailure > settings.REMOTE_RETRY_DELAY)
def __init__(self, host):
self.host = host
def find(self, query):
request = FindRequest(self, query)
request.send()
return request
def fail(self):
self.lastFailure = time.time()
class FindRequest(object):
__slots__ = ('store', 'query', 'connection',
'failed', 'cacheKey', 'cachedResult')
def __init__(self, store, query):
self.store = store
self.query = query
self.connection = None
self.failed = False
if query.startTime:
start = query.startTime - (query.startTime % settings.FIND_CACHE_DURATION)
else:
start = ""
if query.endTime:
end = query.endTime - (query.endTime % settings.FIND_CACHE_DURATION)
else:
end = ""
self.cacheKey = "find:%s:%s:%s:%s" % (store.host, query.pattern, start, end)
self.cachedResult = None
def send(self):
log.info("FindRequest.send(host=%s, query=%s) called" % (self.store.host, self.query))
self.cachedResult = cache.get(self.cacheKey)
if self.cachedResult is not None:
log.info("FindRequest(host=%s, query=%s) using cached result" % (self.store.host, self.query))
return
self.connection = HTTPConnectionWithTimeout(self.store.host)
self.connection.timeout = settings.REMOTE_FIND_TIMEOUT
query_params = [
('local', '1'),
('format', 'pickle'),
('query', self.query.pattern),
]
if self.query.startTime:
query_params.append( ('from', self.query.startTime) )
if self.query.endTime:
query_params.append( ('until', self.query.endTime) )
query_string = urlencode(query_params)
try:
self.connection.request('GET', '/metrics/find/?' + query_string)
except:
log.exception("FindRequest.send(host=%s, query=%s) exception during request" % (self.store.host, self.query))
self.store.fail()
self.failed = True
def get_results(self):
if self.failed:
return
if self.cachedResult is not None:
results = self.cachedResult
else:
if self.connection is None:
self.send()
try:
response = self.connection.getresponse()
assert response.status == 200, "received error response %s - %s" % (response.status, response.reason)
result_data = response.read()
results = pickle.loads(result_data)
except:
log.exception("FindRequest.get_results(host=%s, query=%s) exception processing response" % (self.store.host, self.query))
self.store.fail()
return
cache.set(self.cacheKey, results, settings.FIND_CACHE_DURATION)
for node_info in results:
if node_info.get('is_leaf'):
reader = RemoteReader(self.store, node_info, bulk_query=self.query.pattern)
node = LeafNode(node_info['path'], reader)
else:
node = BranchNode(node_info['path'])
node.local = False
yield node
class RemoteReader(object):
__slots__ = ('store', 'metric_path', 'intervals', 'query')
cache_lock = Lock()
request_cache = {}
request_locks = {}
request_times = {}
def __init__(self, store, node_info, bulk_query=None):
self.store = store
self.metric_path = node_info['path']
self.intervals = node_info['intervals']
self.query = bulk_query or node_info['path']
def __repr__(self):
return '<RemoteReader[%x]: %s>' % (id(self), self.store.host)
def get_intervals(self):
return self.intervals
def fetch(self, startTime, endTime):
query_params = [
('target', self.query),
('format', 'pickle'),
('local', '1'),
('noCache', '1'),
('from', str( int(startTime) )),
('until', str( int(endTime) ))
]
query_string = urlencode(query_params)
urlpath = '/render/?' + query_string
url = "http://%s%s" % (self.store.host, urlpath)
# Quick cache check up front
self.clean_cache()
cached_results = self.request_cache.get(url)
if cached_results:
for series in cached_results:
if series['name'] == self.metric_path:
time_info = (series['start'], series['end'], series['step'])
return (time_info, series['values'])
# Synchronize with other RemoteReaders using the same bulk query.
# Despite our use of thread synchronization primitives, the common
# case is for synchronizing asynchronous fetch operations within
# a single thread.
(request_lock, wait_lock, completion_event) = self.get_request_locks(url)
if request_lock.acquire(False): # we only send the request the first time we're called
try:
log.info("RemoteReader.request_data :: requesting %s" % url)
connection = HTTPConnectionWithTimeout(self.store.host)
connection.timeout = settings.REMOTE_FETCH_TIMEOUT
connection.request('GET', urlpath)
except:
completion_event.set()
self.store.fail()
log.exception("Error requesting %s" % url)
raise
def wait_for_results():
if wait_lock.acquire(False): # the FetchInProgress that gets waited on waits for the actual completion
try:
response = connection.getresponse()
if response.status != 200:
raise Exception("Error response %d %s from %s" % (response.status, response.reason, url))
pickled_response = response.read()
results = pickle.loads(pickled_response)
self.cache_lock.acquire()
self.request_cache[url] = results
self.cache_lock.release()
completion_event.set()
return results
except:
completion_event.set()
self.store.fail()
log.exception("Error requesting %s" % url)
raise
else: # otherwise we just wait on the completion_event
completion_event.wait(settings.REMOTE_FETCH_TIMEOUT)
cached_results = self.request_cache.get(url)
if cached_results is None:
raise Exception("Passive remote fetch failed to find cached results")
else:
return cached_results
def extract_my_results():
for series in wait_for_results():
if series['name'] == self.metric_path:
time_info = (series['start'], series['end'], series['step'])
return (time_info, series['values'])
return FetchInProgress(extract_my_results)
def clean_cache(self):
self.cache_lock.acquire()
try:
if len(self.request_locks) >= settings.REMOTE_READER_CACHE_SIZE_LIMIT:
log.info("RemoteReader.request_data :: clearing old from request_cache and request_locks")
now = time.time()
for url, timestamp in self.request_times.items():
age = now - timestamp
if age >= (2 * settings.REMOTE_FETCH_TIMEOUT):
del self.request_locks[url]
del self.request_times[url]
if url in self.request_cache:
del self.request_cache[url]
finally:
self.cache_lock.release()
def get_request_locks(self, url):
self.cache_lock.acquire()
try:
if url not in self.request_locks:
self.request_locks[url] = (Lock(), Lock(), Event())
self.request_times[url] = time.time()
return self.request_locks[url]
finally:
self.cache_lock.release()
# This is a hack to put a timeout in the connect() of an HTTP request.
# Python 2.6 supports this already, but many Graphite installations
# are not on 2.6 yet.
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
timeout = 30
def connect(self):
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
try:
self.sock.settimeout( float(self.timeout) ) # default self.timeout is an object() in 2.6
except:
pass
self.sock.connect(sa)
self.sock.settimeout(None)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
|
apache-2.0
|
dwf/numpy
|
doc/sphinxext/docscrape.py
|
68
|
15425
|
"""Extract reference documentation from the NumPy source tree.
"""
import inspect
import textwrap
import re
import pydoc
from StringIO import StringIO
from warnings import warn
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data,list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self,n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(object):
def __init__(self, docstring, config={}):
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self,key):
return self._parsed_data[key]
def __setitem__(self,key,val):
if not self._parsed_data.has_key(key):
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
def _strip(self,doc):
i = 0
j = 0
for i,line in enumerate(doc):
if line.strip(): break
for j,line in enumerate(doc[::-1]):
if line.strip(): break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self,content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name,arg_type,desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip(): continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section,content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize() for s in section.split(' ')])
if section in ('Parameters', 'Returns', 'Raises', 'Warns',
'Other Parameters', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*','\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param,param_type,desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']: return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes','References','Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str,indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*','\*')
signature = '%s%s' % (func_name, argspec)
except TypeError, e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if not roles.has_key(self._role):
print "Warning: invalid role %s" % self._role
out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ['__call__']
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config.get('show_class_members', True):
if not self['Methods']:
self['Methods'] = [(name, '', '')
for name in sorted(self.methods)]
if not self['Attributes']:
self['Attributes'] = [(name, '', '')
for name in sorted(self.properties)]
@property
def methods(self):
if self._cls is None:
return []
return [name for name,func in inspect.getmembers(self._cls)
if ((not name.startswith('_')
or name in self.extra_public_methods)
and callable(func))]
@property
def properties(self):
if self._cls is None:
return []
return [name for name,func in inspect.getmembers(self._cls)
if not name.startswith('_') and func is None]
|
bsd-3-clause
|
sargas/scipy
|
scipy/signal/cont2discrete.py
|
1
|
5015
|
"""
Continuous to discrete transformations for state-space and transfer function.
"""
from __future__ import division, print_function, absolute_import
# Author: Jeffrey Armstrong <[email protected]>
# March 29, 2011
import numpy as np
from scipy import linalg
from .ltisys import tf2ss, ss2tf, zpk2ss, ss2zpk
__all__ = ['cont2discrete']
def cont2discrete(sys, dt, method="zoh", alpha=None):
"""
Transform a continuous to a discrete state-space system.
Parameters
----------
sys : a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
dt : float
The discretization time step.
method : {"gbt", "bilinear", "euler", "backward_diff", "zoh"}
Which method to use:
* gbt: generalized bilinear transformation
* bilinear: Tustin's approximation ("gbt" with alpha=0.5)
* euler: Euler (or forward differencing) method ("gbt" with alpha=0)
* backward_diff: Backwards differencing ("gbt" with alpha=1.0)
* zoh: zero-order hold (default)
alpha : float within [0, 1]
The generalized bilinear transformation weighting parameter, which
should only be specified with method="gbt", and is ignored otherwise
Returns
-------
sysd : tuple containing the discrete system
Based on the input type, the output will be of the form
* (num, den, dt) for transfer function input
* (zeros, poles, gain, dt) for zeros-poles-gain input
* (A, B, C, D, dt) for state-space system input
Notes
-----
By default, the routine uses a Zero-Order Hold (zoh) method to perform
the transformation. Alternatively, a generalized bilinear transformation
may be used, which includes the common Tustin's bilinear approximation,
an Euler's method technique, or a backwards differencing technique.
The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear
approximation is based on [2]_ and [3]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models
.. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf
.. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized
bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754,
2009.
(http://www.ece.ualberta.ca/~gfzhang/research/ZCC07_preprint.pdf)
"""
if len(sys) == 2:
sysd = cont2discrete(tf2ss(sys[0], sys[1]), dt, method=method,
alpha=alpha)
return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
elif len(sys) == 3:
sysd = cont2discrete(zpk2ss(sys[0], sys[1], sys[2]), dt, method=method,
alpha=alpha)
return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
elif len(sys) == 4:
a, b, c, d = sys
else:
raise ValueError("First argument must either be a tuple of 2 (tf), "
"3 (zpk), or 4 (ss) arrays.")
if method == 'gbt':
if alpha is None:
raise ValueError("Alpha parameter must be specified for the "
"generalized bilinear transform (gbt) method")
elif alpha < 0 or alpha > 1:
raise ValueError("Alpha parameter must be within the interval "
"[0,1] for the gbt method")
if method == 'gbt':
# This parameter is used repeatedly - compute once here
ima = np.eye(a.shape[0]) - alpha*dt*a
ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a)
bd = linalg.solve(ima, dt*b)
# Similarly solve for the output equation matrices
cd = linalg.solve(ima.transpose(), c.transpose())
cd = cd.transpose()
dd = d + alpha*np.dot(c, bd)
elif method == 'bilinear' or method == 'tustin':
return cont2discrete(sys, dt, method="gbt", alpha=0.5)
elif method == 'euler' or method == 'forward_diff':
return cont2discrete(sys, dt, method="gbt", alpha=0.0)
elif method == 'backward_diff':
return cont2discrete(sys, dt, method="gbt", alpha=1.0)
elif method == 'zoh':
# Build an exponential matrix
em_upper = np.hstack((a, b))
# Need to stack zeros under the a and b matrices
em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])),
np.zeros((b.shape[1], b.shape[1])) ))
em = np.vstack((em_upper, em_lower))
ms = linalg.expm(dt * em)
# Dispose of the lower rows
ms = ms[:a.shape[0], :]
ad = ms[:, 0:a.shape[1]]
bd = ms[:, a.shape[1]:]
cd = c
dd = d
else:
raise ValueError("Unknown transformation method '%s'" % method)
return ad, bd, cd, dd, dt
|
bsd-3-clause
|
RobertWWong/WebDev
|
djangoApp/ENV/lib/python3.5/site-packages/django/contrib/auth/tokens.py
|
45
|
2862
|
from datetime import date
from django.conf import settings
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.http import base36_to_int, int_to_base36
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
if not (user and token):
return False
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
hash = salted_hmac(
self.key_salt,
self._make_hash_value(user, timestamp),
).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _make_hash_value(self, user, timestamp):
# Ensure results are consistent across DB backends
login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)
return (
six.text_type(user.pk) + user.password +
six.text_type(login_timestamp) + six.text_type(timestamp)
)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
|
mit
|
maleficarium/youtube-dl
|
test/test_write_annotations.py
|
18
|
2547
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_params, try_rm
import io
import xml.etree.ElementTree
import youtube_dl.YoutubeDL
import youtube_dl.extractor
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
super(YoutubeDL, self).__init__(*args, **kwargs)
self.to_stderr = self.to_screen
params = get_params({
'writeannotations': True,
'skip_download': True,
'writeinfojson': False,
'format': 'flv',
})
TEST_ID = 'gr51aVj-mLg'
ANNOTATIONS_FILE = TEST_ID + '.annotations.xml'
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
class TestAnnotations(unittest.TestCase):
def setUp(self):
# Clear old files
self.tearDown()
def test_info_json(self):
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
ie = youtube_dl.extractor.YoutubeIE()
ydl = YoutubeDL(params)
ydl.add_info_extractor(ie)
ydl.download([TEST_ID])
self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
annoxml = None
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
annoxml = xml.etree.ElementTree.parse(annof)
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
root = annoxml.getroot()
self.assertEqual(root.tag, 'document')
annotationsTag = root.find('annotations')
self.assertEqual(annotationsTag.tag, 'annotations')
annotations = annotationsTag.findall('annotation')
# Not all the annotations have TEXT children and the annotations are returned unsorted.
for a in annotations:
self.assertEqual(a.tag, 'annotation')
if a.get('type') == 'text':
textTag = a.find('TEXT')
text = textTag.text
self.assertTrue(text in expected) # assertIn only added in python 2.7
# remove the first occurrence, there could be more than one annotation with the same text
expected.remove(text)
# We should have seen (and removed) all the expected annotation texts.
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
def tearDown(self):
try_rm(ANNOTATIONS_FILE)
if __name__ == '__main__':
unittest.main()
|
unlicense
|
TaliesinSkye/evennia
|
contrib/procpools/ampoule/pool.py
|
6
|
15205
|
import time
import random
import heapq
import itertools
import signal
choice = random.choice
now = time.time
count = itertools.count().next
pop = heapq.heappop
from twisted.internet import defer, task, error
from twisted.python import log, failure
from contrib.procpools.ampoule import commands, main
try:
DIE = signal.SIGKILL
except AttributeError:
# Windows doesn't have SIGKILL, let's just use SIGTERM then
DIE = signal.SIGTERM
class ProcessPool(object):
"""
This class generalizes the functionality of a pool of
processes to which work can be dispatched.
@ivar finished: Boolean flag, L{True} when the pool is finished.
@ivar started: Boolean flag, L{True} when the pool is started.
@ivar name: Optional name for the process pool
@ivar min: Minimum number of subprocesses to set up
@ivar max: Maximum number of subprocesses to set up
@ivar maxIdle: Maximum number of seconds of indleness in a child
@ivar starter: A process starter instance that provides
L{iampoule.IStarter}.
@ivar recycleAfter: Maximum number of calls before restarting a
subprocess, 0 to not recycle.
@ivar ampChild: The child AMP protocol subclass with the commands
that the child should implement.
@ivar ampParent: The parent AMP protocol subclass with the commands
that the parent should implement.
@ivar timeout: The general timeout (in seconds) for every child
process call.
"""
finished = False
started = False
name = None
def __init__(self, ampChild=None, ampParent=None, min=5, max=20,
name=None, maxIdle=20, recycleAfter=500, starter=None,
timeout=None, timeout_signal=DIE, ampChildArgs=()):
self.starter = starter
self.ampChildArgs = tuple(ampChildArgs)
if starter is None:
self.starter = main.ProcessStarter(packages=("twisted", "ampoule"))
self.ampParent = ampParent
self.ampChild = ampChild
if ampChild is None:
from contrib.procpools.ampoule.child import AMPChild
self.ampChild = AMPChild
self.min = min
self.max = max
self.name = name
self.maxIdle = maxIdle
self.recycleAfter = recycleAfter
self.timeout = timeout
self.timeout_signal = timeout_signal
self._queue = []
self.processes = set()
self.ready = set()
self.busy = set()
self._finishCallbacks = {}
self._lastUsage = {}
self._calls = {}
self.looping = task.LoopingCall(self._pruneProcesses)
self.looping.start(maxIdle, now=False)
def start(self, ampChild=None):
"""
Starts the ProcessPool with a given child protocol.
@param ampChild: a L{ampoule.child.AMPChild} subclass.
@type ampChild: L{ampoule.child.AMPChild} subclass
"""
if ampChild is not None and not self.started:
self.ampChild = ampChild
self.finished = False
self.started = True
return self.adjustPoolSize()
def _pruneProcesses(self):
"""
Remove idle processes from the pool.
"""
n = now()
d = []
for child, lastUse in self._lastUsage.iteritems():
if len(self.processes) > self.min and (n - lastUse) > self.maxIdle:
# we are setting lastUse when processing finishes, it
# might be processing right now
if child not in self.busy:
# we need to remove this child from the ready set
# and the processes set because otherwise it might
# get calls from doWork
self.ready.discard(child)
self.processes.discard(child)
d.append(self.stopAWorker(child))
return defer.DeferredList(d)
def _pruneProcess(self, child):
"""
Remove every trace of the process from this instance.
"""
self.processes.discard(child)
self.ready.discard(child)
self.busy.discard(child)
self._lastUsage.pop(child, None)
self._calls.pop(child, None)
self._finishCallbacks.pop(child, None)
def _addProcess(self, child, finished):
"""
Adds the newly created child process to the pool.
"""
def restart(child, reason):
#log.msg("FATAL: Restarting after %s" % (reason,))
self._pruneProcess(child)
return self.startAWorker()
def dieGently(data, child):
#log.msg("STOPPING: '%s'" % (data,))
self._pruneProcess(child)
self.processes.add(child)
self.ready.add(child)
finished.addCallback(dieGently, child
).addErrback(lambda reason: restart(child, reason))
self._finishCallbacks[child] = finished
self._lastUsage[child] = now()
self._calls[child] = 0
self._catchUp()
def _catchUp(self):
"""
If there are queued items in the list then run them.
"""
if self._queue:
_, (d, command, kwargs) = pop(self._queue)
self._cb_doWork(command, **kwargs).chainDeferred(d)
def _handleTimeout(self, child):
"""
One of the children went timeout, we need to deal with it
@param child: The child process
@type child: L{child.AMPChild}
"""
try:
child.transport.signalProcess(self.timeout_signal)
except error.ProcessExitedAlready:
# don't do anything then... we are too late
# or we were too early to call
pass
def startAWorker(self):
"""
Start a worker and set it up in the system.
"""
if self.finished:
# this is a race condition: basically if we call self.stop()
# while a process is being recycled what happens is that the
# process will be created anyway. By putting a check for
# self.finished here we make sure that in no way we are creating
# processes when the pool is stopped.
# The race condition comes from the fact that:
# stopAWorker() is asynchronous while stop() is synchronous.
# so if you call:
# pp.stopAWorker(child).addCallback(lambda _: pp.startAWorker())
# pp.stop()
# You might end up with a dirty reactor due to the stop()
# returning before the new process is created.
return
startAMPProcess = self.starter.startAMPProcess
child, finished = startAMPProcess(self.ampChild,
ampParent=self.ampParent,
ampChildArgs=self.ampChildArgs)
return self._addProcess(child, finished)
def _cb_doWork(self, command, _timeout=None, _deadline=None,
**kwargs):
"""
Go and call the command.
@param command: The L{amp.Command} to be executed in the child
@type command: L{amp.Command}
@param _d: The deferred for the calling code.
@type _d: L{defer.Deferred}
@param _timeout: The timeout for this call only
@type _timeout: C{int}
@param _deadline: The deadline for this call only
@type _deadline: C{int}
"""
timeoutCall = None
deadlineCall = None
def _returned(result, child, is_error=False):
def cancelCall(call):
if call is not None and call.active():
call.cancel()
cancelCall(timeoutCall)
cancelCall(deadlineCall)
self.busy.discard(child)
if not die:
# we are not marked to be removed, so add us back to
# the ready set and let's see if there's some catching
# up to do
self.ready.add(child)
self._catchUp()
else:
# We should die and we do, then we start a new worker
# to pick up stuff from the queue otherwise we end up
# without workers and the queue will remain there.
self.stopAWorker(child).addCallback(lambda _: self.startAWorker())
self._lastUsage[child] = now()
# we can't do recycling here because it's too late and
# the process might have received tons of calls already
# which would make it run more calls than what is
# configured to do.
return result
die = False
child = self.ready.pop()
self.busy.add(child)
self._calls[child] += 1
# Let's see if this call goes over the recycling barrier
if self.recycleAfter and self._calls[child] >= self.recycleAfter:
# it does so mark this child, using a closure, to be
# removed at the end of the call.
die = True
# If the command doesn't require a response then callRemote
# returns nothing, so we prepare for that too.
# We also need to guard against timeout errors for child
# and local timeout parameter overrides the global one
if _timeout == 0:
timeout = _timeout
else:
timeout = _timeout or self.timeout
if timeout is not None:
from twisted.internet import reactor
timeoutCall = reactor.callLater(timeout, self._handleTimeout, child)
if _deadline is not None:
from twisted.internet import reactor
delay = max(0, _deadline - reactor.seconds())
deadlineCall = reactor.callLater(delay, self._handleTimeout,
child)
return defer.maybeDeferred(child.callRemote, command, **kwargs
).addCallback(_returned, child
).addErrback(_returned, child, is_error=True)
def callRemote(self, *args, **kwargs):
"""
Proxy call to keep the API homogeneous across twisted's RPCs
"""
return self.doWork(*args, **kwargs)
def doWork(self, command, **kwargs):
"""
Sends the command to one child.
@param command: an L{amp.Command} type object.
@type command: L{amp.Command}
@param kwargs: dictionary containing the arguments for the command.
"""
if self.ready: # there are unused processes, let's use them
return self._cb_doWork(command, **kwargs)
else:
if len(self.processes) < self.max:
# no unused but we can start some new ones
# since startAWorker is synchronous we won't have a
# race condition here in case of multiple calls to
# doWork, so we will end up in the else clause in case
# of such calls:
# Process pool with min=1, max=1, recycle_after=1
# [call(Command) for x in xrange(BIG_NUMBER)]
self.startAWorker()
return self._cb_doWork(command, **kwargs)
else:
# No one is free... just queue up and wait for a process
# to start and pick up the first item in the queue.
d = defer.Deferred()
self._queue.append((count(), (d, command, kwargs)))
return d
def stopAWorker(self, child=None):
"""
Gently stop a child so that it's not restarted anymore
@param command: an L{ampoule.child.AmpChild} type object.
@type command: L{ampoule.child.AmpChild} or None
"""
if child is None:
if self.ready:
child = self.ready.pop()
else:
child = choice(list(self.processes))
child.callRemote(commands.Shutdown
# This is needed for timeout handling, the reason is pretty hard
# to explain but I'll try to:
# There's another small race condition in the system. If the
# child process is shut down by a signal and you try to stop
# the process pool immediately afterwards, like tests would do,
# the child AMP object would still be in the system and trying
# to call the command Shutdown on it would result in the same
# errback that we got originally, for this reason we need to
# trap it now so that it doesn't raise by not being handled.
# Does this even make sense to you?
).addErrback(lambda reason: reason.trap(error.ProcessTerminated))
return self._finishCallbacks[child]
def _startSomeWorkers(self):
"""
Start a bunch of workers until we reach the max number of them.
"""
if len(self.processes) < self.max:
self.startAWorker()
def adjustPoolSize(self, min=None, max=None):
"""
Change the pool size to be at least min and less than max,
useful when you change the values of max and min in the instance
and you want the pool to adapt to them.
"""
if min is None:
min = self.min
if max is None:
max = self.max
assert min >= 0, 'minimum is negative'
assert min <= max, 'minimum is greater than maximum'
self.min = min
self.max = max
l = []
if self.started:
for i in xrange(len(self.processes)-self.max):
l.append(self.stopAWorker())
while len(self.processes) < self.min:
self.startAWorker()
return defer.DeferredList(l)#.addCallback(lambda _: self.dumpStats())
def stop(self):
"""
Stops the process protocol.
"""
self.finished = True
l = [self.stopAWorker(process) for process in self.processes]
def _cb(_):
if self.looping.running:
self.looping.stop()
return defer.DeferredList(l).addCallback(_cb)
def dumpStats(self):
log.msg("ProcessPool stats:")
log.msg('\tworkers: %s' % len(self.processes))
log.msg('\ttimeout: %s' % (self.timeout))
log.msg('\tparent: %r' % (self.ampParent,))
log.msg('\tchild: %r' % (self.ampChild,))
log.msg('\tmax idle: %r' % (self.maxIdle,))
log.msg('\trecycle after: %r' % (self.recycleAfter,))
log.msg('\tProcessStarter:')
log.msg('\t\t%r' % (self.starter,))
pp = None
def deferToAMPProcess(command, **kwargs):
"""
Helper function that sends a command to the default process pool
and returns a deferred that fires when the result of the
subprocess computation is ready.
@param command: an L{amp.Command} subclass
@param kwargs: dictionary containing the arguments for the command.
@return: a L{defer.Deferred} with the data from the subprocess.
"""
global pp
if pp is None:
pp = ProcessPool()
return pp.start().addCallback(lambda _: pp.doWork(command, **kwargs))
return pp.doWork(command, **kwargs)
|
bsd-3-clause
|
x13n/contrib
|
keepalived-vip/vendor/github.com/ugorji/go/codec/test.py
|
1138
|
3876
|
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
u"someday",
u"",
u"bytestring",
1328176922000002000,
-2206187877999998000,
270,
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
|
apache-2.0
|
FusionWorks/adam
|
bin/append_to_option.py
|
18
|
1630
|
#!/usr/bin/env python
#
# Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Append text to an option.
Usage:
append_to_option.py DELIMITER OPTION APPEND_STRING OPTION_STRING
For example, running
append_to_option.py , --jars myproject.jar --option1 value1 --jars otherproject.jar --option2 value2
will write to stdout
--option1 value1 --jars otherproject.jar,myproject.jar --option2 value2
"""
import sys
delimiter = sys.argv[1]
target = sys.argv[2]
append = sys.argv[3]
original = sys.argv[4:]
if original.count(target) > 1:
sys.stderr.write("Found multiple %s in the option list." % target)
sys.exit(1)
if original.count(target) == 0:
original.extend([target, append])
else: # original.count(target) == 1
idx = original.index(target)
new_value = delimiter.join([original[idx + 1], append])
original[idx + 1] = new_value
sys.stdout.write(' '.join(original))
|
apache-2.0
|
apache-spark-on-k8s/spark
|
examples/src/main/python/pi.py
|
14
|
1468
|
from __future__ import print_function
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from random import random
from operator import add
from pyspark.sql import SparkSession
if __name__ == "__main__":
"""
Usage: pi [partitions]
"""
spark = SparkSession\
.builder\
.appName("PythonPi")\
.getOrCreate()
partitions = int(sys.argv[1]) if len(sys.argv) > 1 else 2
n = 100000 * partitions
def f(_):
x = random() * 2 - 1
y = random() * 2 - 1
return 1 if x ** 2 + y ** 2 <= 1 else 0
count = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)
print("Pi is roughly %f" % (4.0 * count / n))
spark.stop()
|
apache-2.0
|
fake-name/ReadableWebProxy
|
sqlalchemy_continuum_vendored/relationship_builder.py
|
2
|
12507
|
import sqlalchemy as sa
from .exc import ClassNotVersioned
from .expression_reflector import VersionExpressionReflector
from .operation import Operation
from .table_builder import TableBuilder
from .utils import adapt_columns, version_class, option
class RelationshipBuilder(object):
def __init__(self, versioning_manager, model, property_):
self.manager = versioning_manager
self.property = property_
self.model = model
def one_to_many_subquery(self, obj):
tx_column = option(obj, 'transaction_column_name')
remote_alias = sa.orm.aliased(self.remote_cls)
primary_keys = [
getattr(remote_alias, column.name) for column
in sa.inspect(remote_alias).mapper.columns
if column.primary_key and column.name != tx_column
]
return sa.exists(
sa.select(
[1]
).where(
sa.and_(
getattr(remote_alias, tx_column) <=
getattr(obj, tx_column),
*[
getattr(remote_alias, pk.name) ==
getattr(self.remote_cls, pk.name)
for pk in primary_keys
]
)
).group_by(
*primary_keys
).having(
sa.func.max(getattr(remote_alias, tx_column)) ==
getattr(self.remote_cls, tx_column)
).correlate(self.local_cls, self.remote_cls)
)
def many_to_one_subquery(self, obj):
tx_column = option(obj, 'transaction_column_name')
reflector = VersionExpressionReflector(obj, self.property)
return getattr(self.remote_cls, tx_column) == (
sa.select(
[sa.func.max(getattr(self.remote_cls, tx_column))]
).where(
sa.and_(
getattr(self.remote_cls, tx_column) <=
getattr(obj, tx_column),
reflector(self.property.primaryjoin)
)
)
)
def query(self, obj):
session = sa.orm.object_session(obj)
return (
session.query(self.remote_cls)
.filter(
self.criteria(obj)
)
)
def process_query(self, query):
"""
Process given SQLAlchemy Query object depending on the associated
RelationshipProperty object.
:param query: SQLAlchemy Query object
"""
if self.property.lazy == 'dynamic':
return query
if self.property.uselist is False:
return query.first()
return query.all()
def criteria(self, obj):
direction = self.property.direction
if self.versioned:
if direction.name == 'ONETOMANY':
return self.one_to_many_criteria(obj)
elif direction.name == 'MANYTOMANY':
return self.many_to_many_criteria(obj)
elif direction.name == 'MANYTOONE':
return self.many_to_one_criteria(obj)
else:
reflector = VersionExpressionReflector(obj, self.property)
return reflector(self.property.primaryjoin)
def many_to_many_criteria(self, obj):
"""
Returns the many-to-many query.
Looks up remote items through associations and for each item returns
returns the last version with a transaction less than or equal to the
transaction of `obj`. This must hold true for both the association and
the remote relation items.
Example
-------
Select all tags of article with id 3 and transaction 5
.. code-block:: sql
SELECT tags_version.*
FROM tags_version
WHERE EXISTS (
SELECT 1
FROM article_tag_version
WHERE article_id = 3
AND tag_id = tags_version.id
AND operation_type != 2
AND EXISTS (
SELECT 1
FROM article_tag_version as article_tag_version2
WHERE article_tag_version2.tag_id = article_tag_version.tag_id
AND article_tag_version2.tx_id <= 5
GROUP BY article_tag_version2.tag_id
HAVING
MAX(article_tag_version2.tx_id) =
article_tag_version.tx_id
)
)
AND EXISTS (
SELECT 1
FROM tags_version as tags_version_2
WHERE tags_version_2.id = tags_version.id
AND tags_version_2.tx_id <= 5
GROUP BY tags_version_2.id
HAVING MAX(tags_version_2.tx_id) = tags_version.tx_id
)
AND operation_type != 2
"""
return sa.and_(
self.association_subquery(obj),
self.one_to_many_subquery(obj),
self.remote_cls.operation_type != Operation.DELETE
)
def many_to_one_criteria(self, obj):
"""Returns the many-to-one query.
Returns the item on the 'one' side with the highest transaction id
as long as it is less or equal to the transaction id of the `obj`.
Example
-------
Look up the Article of a Tag with article_id = 4 and
transaction_id = 5
.. code-block:: sql
SELECT *
FROM articles_version
WHERE id = 4
AND transaction_id = (
SELECT max(transaction_id)
FROM articles_version
WHERE transaction_id <= 5
AND id = 4
)
AND operation_type != 2
"""
reflector = VersionExpressionReflector(obj, self.property)
return sa.and_(
reflector(self.property.primaryjoin),
self.many_to_one_subquery(obj),
self.remote_cls.operation_type != Operation.DELETE
)
def one_to_many_criteria(self, obj):
"""
Returns the one-to-many query.
For each item on the 'many' side, returns its latest version as long as
the transaction of that version is less than equal of the transaction
of `obj`.
Example
-------
Using the Article-Tags relationship, where we look for tags of
article_version with id = 3 and transaction = 5 the sql produced is
.. code-block:: sql
SELECT tags_version.*
FROM tags_version
WHERE tags_version.article_id = 3
AND tags_version.operation_type != 2
AND EXISTS (
SELECT 1
FROM tags_version as tags_version_last
WHERE tags_version_last.transaction_id <= 5
AND tags_version_last.id = tags_version.id
GROUP BY tags_version_last.id
HAVING
MAX(tags_version_last.transaction_id) =
tags_version.transaction_id
)
"""
reflector = VersionExpressionReflector(obj, self.property)
return sa.and_(
reflector(self.property.primaryjoin),
self.one_to_many_subquery(obj),
self.remote_cls.operation_type != Operation.DELETE
)
@property
def reflected_relationship(self):
"""
Builds a reflected one-to-many, one-to-one and many-to-one
relationship between two version classes.
"""
@property
def relationship(obj):
query = self.query(obj)
return self.process_query(query)
return relationship
def association_subquery(self, obj):
"""
Returns an EXISTS clause that checks if an association exists for given
SQLAlchemy declarative object. This query is used by
many_to_many_criteria method.
Example query:
.. code-block:: sql
EXISTS (
SELECT 1
FROM article_tag_version
WHERE article_id = 3
AND tag_id = tags_version.id
AND operation_type != 2
AND EXISTS (
SELECT 1
FROM article_tag_version as article_tag_version2
WHERE article_tag_version2.tag_id = article_tag_version.tag_id
AND article_tag_version2.tx_id <=5
GROUP BY article_tag_version2.tag_id
HAVING
MAX(article_tag_version2.tx_id) =
article_tag_version.tx_id
)
)
:param obj: SQLAlchemy declarative object
"""
tx_column = option(obj, 'transaction_column_name')
reflector = VersionExpressionReflector(obj, self.property)
association_table_alias = self.association_version_table.alias()
association_cols = [
association_table_alias.c[association_col.name]
for _, association_col
in self.remote_to_association_column_pairs
]
association_exists = sa.exists(
sa.select(
[1]
).where(
sa.and_(
association_table_alias.c[tx_column] <=
getattr(obj, tx_column),
*[association_col ==
self.association_version_table.c[association_col.name]
for association_col
in association_cols]
)
).group_by(
*association_cols
).having(
sa.func.max(association_table_alias.c[tx_column]) ==
self.association_version_table.c[tx_column]
).correlate(self.association_version_table)
)
return sa.exists(
sa.select(
[1]
).where(
sa.and_(
reflector(self.property.primaryjoin),
association_exists,
self.association_version_table.c.operation_type !=
Operation.DELETE,
adapt_columns(self.property.secondaryjoin),
)
).correlate(self.local_cls, self.remote_cls)
)
def build_association_version_tables(self):
"""
Builds many-to-many association version table for given property.
Association version tables are used for tracking change history of
many-to-many associations.
"""
column = list(self.property.remote_side)[0]
self.manager.association_tables.add(column.table)
builder = TableBuilder(
self.manager,
column.table
)
metadata = column.table.metadata
if builder.parent_table.schema:
table_name = builder.parent_table.schema + '.' + builder.table_name
elif metadata.schema:
table_name = metadata.schema + '.' + builder.table_name
else:
table_name = builder.table_name
if table_name not in metadata.tables:
self.association_version_table = table = builder()
self.manager.association_version_tables.add(table)
else:
# may have already been created if we visiting the 'other' side of
# a self-referential many-to-many relationship
self.association_version_table = metadata.tables[table_name]
def __call__(self):
"""
Builds reflected relationship between version classes based on given
parent object's RelationshipProperty.
"""
self.local_cls = version_class(self.model)
self.versioned = False
try:
self.remote_cls = version_class(self.property.mapper.class_)
self.versioned = True
except (AttributeError, KeyError):
return
except ClassNotVersioned:
self.remote_cls = self.property.mapper.class_
if (self.property.secondary is not None and
not self.property.viewonly and
not self.manager.is_excluded_property(
self.model, self.property.key)):
self.build_association_version_tables()
# store remote cls to association table column pairs
self.remote_to_association_column_pairs = []
for column_pair in self.property.local_remote_pairs:
if column_pair[0] in self.property.target.c.values():
self.remote_to_association_column_pairs.append(column_pair)
setattr(
self.local_cls,
self.property.key,
self.reflected_relationship
)
|
bsd-3-clause
|
pyramania/scipy
|
scipy/integrate/tests/test_quadrature.py
|
40
|
8072
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import cos, sin, pi
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_almost_equal, assert_allclose, assert_
from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
cumtrapz, quad, simps)
from scipy.integrate.quadrature import AccuracyWarning
class TestQuadrature(TestCase):
def quad(self, x, a, b, args):
raise NotImplementedError
def test_quadrature(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_quadrature_rtol(self):
def myfunc(x, n, z): # Bessel function integrand
return 1e90 * cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_quadrature_miniter(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
table_val = 0.30614353532540296487
for miniter in [5, 52]:
val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
assert_almost_equal(val, table_val, decimal=7)
assert_(err < 1.0)
def test_quadrature_single_args(self):
def myfunc(x, n):
return 1e90 * cos(n*x-1.8*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romberg(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romberg_rtol(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return 1e19*cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
table_val = 1e19*0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romb(self):
assert_equal(romb(np.arange(17)), 128)
def test_romb_gh_3731(self):
# Check that romb makes maximal use of data points
x = np.arange(2**4+1)
y = np.cos(0.2*x)
val = romb(y)
val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
assert_allclose(val, val2, rtol=1e-8, atol=0)
# should be equal to romb with 2**k+1 samples
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=AccuracyWarning)
val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(),
divmax=4)
assert_allclose(val, val3, rtol=1e-12, atol=0)
def test_non_dtype(self):
# Check that we work fine with functions returning float
import math
valmath = romberg(math.sin, 0, 1)
expected_val = 0.45969769413185085
assert_almost_equal(valmath, expected_val, decimal=7)
def test_newton_cotes(self):
"""Test the first few degrees, for evenly spaced points."""
n = 1
wts, errcoff = newton_cotes(n, 1)
assert_equal(wts, n*np.array([0.5, 0.5]))
assert_almost_equal(errcoff, -n**3/12.0)
n = 2
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
assert_almost_equal(errcoff, -n**5/2880.0)
n = 3
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
assert_almost_equal(errcoff, -n**5/6480.0)
n = 4
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
assert_almost_equal(errcoff, -n**7/1935360.0)
def test_newton_cotes2(self):
"""Test newton_cotes with points that are not evenly spaced."""
x = np.array([0.0, 1.5, 2.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 8.0/3
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
x = np.array([0.0, 1.4, 2.1, 3.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 9.0
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
def test_simps(self):
y = np.arange(17)
assert_equal(simps(y), 128)
assert_equal(simps(y, dx=0.5), 64)
assert_equal(simps(y, x=np.linspace(0, 4, 17)), 32)
y = np.arange(4)
x = 2**y
assert_equal(simps(y, x=x, even='avg'), 13.875)
assert_equal(simps(y, x=x, even='first'), 13.75)
assert_equal(simps(y, x=x, even='last'), 14)
class TestCumtrapz(TestCase):
def test_1d(self):
x = np.linspace(-2, 2, num=5)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = [0., -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, x, initial=None)
assert_allclose(y_int, y_expected[1:])
def test_y_nd_x_nd(self):
x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = np.array([[[0., 0.5, 2., 4.5],
[0., 4.5, 10., 16.5]],
[[0., 8.5, 18., 28.5],
[0., 12.5, 26., 40.5]],
[[0., 16.5, 34., 52.5],
[0., 20.5, 42., 64.5]]])
assert_allclose(y_int, y_expected)
# Try with all axes
shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
for axis, shape in zip([0, 1, 2], shapes):
y_int = cumtrapz(y, x, initial=3.45, axis=axis)
assert_equal(y_int.shape, (3, 2, 4))
y_int = cumtrapz(y, x, initial=None, axis=axis)
assert_equal(y_int.shape, shape)
def test_y_nd_x_1d(self):
y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
x = np.arange(4)**2
# Try with all axes
ys_expected = (
np.array([[[4., 5., 6., 7.],
[8., 9., 10., 11.]],
[[40., 44., 48., 52.],
[56., 60., 64., 68.]]]),
np.array([[[2., 3., 4., 5.]],
[[10., 11., 12., 13.]],
[[18., 19., 20., 21.]]]),
np.array([[[0.5, 5., 17.5],
[4.5, 21., 53.5]],
[[8.5, 37., 89.5],
[12.5, 53., 125.5]],
[[16.5, 69., 161.5],
[20.5, 85., 197.5]]]))
for axis, y_expected in zip([0, 1, 2], ys_expected):
y_int = cumtrapz(y, x=x[:y.shape[axis]], axis=axis, initial=None)
assert_allclose(y_int, y_expected)
def test_x_none(self):
y = np.linspace(-2, 2, num=5)
y_int = cumtrapz(y)
y_expected = [-1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, initial=1.23)
y_expected = [1.23, -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3)
y_expected = [-4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3, initial=1.23)
y_expected = [1.23, -4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
amwelch/a10sdk-python
|
a10sdk/core/A10_file/file_debug_monitor.py
|
2
|
1972
|
from a10sdk.common.A10BaseClass import A10BaseClass
class DebugMonitor(A10BaseClass):
""" :param action: {"optional": true, "enum": ["create", "import", "export", "copy", "rename", "check", "replace", "delete"], "type": "string", "description": "'create': create; 'import': import; 'export': export; 'copy': copy; 'rename': rename; 'check': check; 'replace': replace; 'delete': delete; ", "format": "enum"}
:param dst_file: {"description": "destination file name for copy and rename action", "format": "string", "minLength": 1, "optional": true, "maxLength": 32, "type": "string"}
:param file_handle: {"description": "full path of the uploaded file", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 255, "type": "string"}
:param file: {"description": "debug monitor local file name", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param size: {"description": "debug monitor file size in byte", "format": "number", "type": "number", "maximum": 2147483647, "minimum": 0, "optional": true}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
debug monitor file information and management commands.
Class debug-monitor supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/file/debug-monitor`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "debug-monitor"
self.a10_url="/axapi/v3/file/debug-monitor"
self.DeviceProxy = ""
self.action = ""
self.dst_file = ""
self.file_handle = ""
self.A10WW_file = ""
self.size = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
apache-2.0
|
nemith/Marlin-BigBoxPro
|
Marlin/scripts/g29_auto.py
|
184
|
4842
|
#!/usr/bin/python3
# This file is for preprocessing gcode and the new G29 Autobedleveling from Marlin
# It will analyse the first 2 Layer and return the maximum size for this part
# After this it will replace with g29_keyword = ';MarlinG29Script' with the new G29 LRFB
# the new file will be created in the same folder.
# your gcode-file/folder
folder = './'
my_file = 'test.gcode'
# this is the minimum of G1 instructions which should be between 2 different heights
min_g1 = 3
# maximum number of lines to parse, I don't want to parse the complete file
# only the first plane is we are interested in
max_g1 = 100000000
# g29 keyword
g29_keyword = 'g29'
g29_keyword = g29_keyword.upper()
# output filename
output_file = folder + 'g29_' + my_file
# input filename
input_file = folder + my_file
# minimum scan size
min_size = 40
probing_points = 3 # points x points
# other stuff
min_x = 500
min_y = min_x
max_x = -500
max_y = max_x
last_z = 0.001
layer = 0
lines_of_g1 = 0
gcode = []
# return only g1-lines
def has_g1(line):
return line[:2].upper() == "G1"
# find position in g1 (x,y,z)
def find_axis(line, axis):
found = False
number = ""
for char in line:
if found:
if char == ".":
number += char
elif char == "-":
number += char
else:
try:
int(char)
number += char
except ValueError:
break
else:
found = char.upper() == axis.upper()
try:
return float(number)
except ValueError:
return None
# save the min or max-values for each axis
def set_mima(line):
global min_x, max_x, min_y, max_y, last_z
current_x = find_axis(line, 'x')
current_y = find_axis(line, 'y')
if current_x is not None:
min_x = min(current_x, min_x)
max_x = max(current_x, max_x)
if current_y is not None:
min_y = min(current_y, min_y)
max_y = max(current_y, max_y)
return min_x, max_x, min_y, max_y
# find z in the code and return it
def find_z(gcode, start_at_line=0):
for i in range(start_at_line, len(gcode)):
my_z = find_axis(gcode[i], 'Z')
if my_z is not None:
return my_z, i
def z_parse(gcode, start_at_line=0, end_at_line=0):
i = start_at_line
all_z = []
line_between_z = []
z_at_line = []
# last_z = 0
last_i = -1
while len(gcode) > i:
try:
z, i = find_z(gcode, i + 1)
except TypeError:
break
all_z.append(z)
z_at_line.append(i)
temp_line = i - last_i -1
line_between_z.append(i - last_i - 1)
# last_z = z
last_i = i
if 0 < end_at_line <= i or temp_line >= min_g1:
# print('break at line {} at heigth {}'.format(i, z))
break
line_between_z = line_between_z[1:]
return all_z, line_between_z, z_at_line
# get the lines which should be the first layer
def get_lines(gcode, minimum):
i = 0
all_z, line_between_z, z_at_line = z_parse(gcode, end_at_line=max_g1)
for count in line_between_z:
i += 1
if count > minimum:
# print('layer: {}:{}'.format(z_at_line[i-1], z_at_line[i]))
return z_at_line[i - 1], z_at_line[i]
with open(input_file, 'r') as file:
lines = 0
for line in file:
lines += 1
if lines > 1000:
break
if has_g1(line):
gcode.append(line)
file.close()
start, end = get_lines(gcode, min_g1)
for i in range(start, end):
set_mima(gcode[i])
print('x_min:{} x_max:{}\ny_min:{} y_max:{}'.format(min_x, max_x, min_y, max_y))
# resize min/max - values for minimum scan
if max_x - min_x < min_size:
offset_x = int((min_size - (max_x - min_x)) / 2 + 0.5) # int round up
# print('min_x! with {}'.format(int(max_x - min_x)))
min_x = int(min_x) - offset_x
max_x = int(max_x) + offset_x
if max_y - min_y < min_size:
offset_y = int((min_size - (max_y - min_y)) / 2 + 0.5) # int round up
# print('min_y! with {}'.format(int(max_y - min_y)))
min_y = int(min_y) - offset_y
max_y = int(max_y) + offset_y
new_command = 'G29 L{0} R{1} F{2} B{3} P{4}\n'.format(min_x,
max_x,
min_y,
max_y,
probing_points)
out_file = open(output_file, 'w')
in_file = open(input_file, 'r')
for line in in_file:
if line[:len(g29_keyword)].upper() == g29_keyword:
out_file.write(new_command)
print('write G29')
else:
out_file.write(line)
file.close()
out_file.close()
print('auto G29 finished')
|
gpl-3.0
|
partofthething/home-assistant
|
homeassistant/components/bmw_connected_drive/lock.py
|
4
|
3564
|
"""Support for BMW car locks with BMW ConnectedDrive."""
import logging
from bimmer_connected.state import LockState
from homeassistant.components.lock import LockEntity
from homeassistant.const import STATE_LOCKED, STATE_UNLOCKED
from . import DOMAIN as BMW_DOMAIN, BMWConnectedDriveBaseEntity
from .const import CONF_ACCOUNT, DATA_ENTRIES
DOOR_LOCK_STATE = "door_lock_state"
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the BMW ConnectedDrive binary sensors from config entry."""
account = hass.data[BMW_DOMAIN][DATA_ENTRIES][config_entry.entry_id][CONF_ACCOUNT]
entities = []
if not account.read_only:
for vehicle in account.account.vehicles:
device = BMWLock(account, vehicle, "lock", "BMW lock")
entities.append(device)
async_add_entities(entities, True)
class BMWLock(BMWConnectedDriveBaseEntity, LockEntity):
"""Representation of a BMW vehicle lock."""
def __init__(self, account, vehicle, attribute: str, sensor_name):
"""Initialize the lock."""
super().__init__(account, vehicle)
self._attribute = attribute
self._name = f"{self._vehicle.name} {self._attribute}"
self._unique_id = f"{self._vehicle.vin}-{self._attribute}"
self._sensor_name = sensor_name
self._state = None
self.door_lock_state_available = (
DOOR_LOCK_STATE in self._vehicle.available_attributes
)
@property
def unique_id(self):
"""Return the unique ID of the lock."""
return self._unique_id
@property
def name(self):
"""Return the name of the lock."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the lock."""
vehicle_state = self._vehicle.state
result = self._attrs.copy()
if self.door_lock_state_available:
result["door_lock_state"] = vehicle_state.door_lock_state.value
result["last_update_reason"] = vehicle_state.last_update_reason
return result
@property
def is_locked(self):
"""Return true if lock is locked."""
if self.door_lock_state_available:
result = self._state == STATE_LOCKED
else:
result = None
return result
def lock(self, **kwargs):
"""Lock the car."""
_LOGGER.debug("%s: locking doors", self._vehicle.name)
# Optimistic state set here because it takes some time before the
# update callback response
self._state = STATE_LOCKED
self.schedule_update_ha_state()
self._vehicle.remote_services.trigger_remote_door_lock()
def unlock(self, **kwargs):
"""Unlock the car."""
_LOGGER.debug("%s: unlocking doors", self._vehicle.name)
# Optimistic state set here because it takes some time before the
# update callback response
self._state = STATE_UNLOCKED
self.schedule_update_ha_state()
self._vehicle.remote_services.trigger_remote_door_unlock()
def update(self):
"""Update state of the lock."""
_LOGGER.debug("%s: updating data for %s", self._vehicle.name, self._attribute)
vehicle_state = self._vehicle.state
# Possible values: LOCKED, SECURED, SELECTIVE_LOCKED, UNLOCKED
self._state = (
STATE_LOCKED
if vehicle_state.door_lock_state in [LockState.LOCKED, LockState.SECURED]
else STATE_UNLOCKED
)
|
mit
|
tjsavage/tmrwmedia
|
django/db/utils.py
|
16
|
5970
|
import inspect
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
DEFAULT_DB_ALIAS = 'default'
# Define some exceptions that mirror the PEP249 interface.
# We will rethrow any backend-specific errors using these
# common wrappers
class DatabaseError(Exception):
pass
class IntegrityError(DatabaseError):
pass
def load_backend(backend_name):
try:
module = import_module('.base', 'django.db.backends.%s' % backend_name)
import warnings
warnings.warn(
"Short names for DATABASE_ENGINE are deprecated; prepend with 'django.db.backends.'",
DeprecationWarning
)
return module
except ImportError, e:
# Look for a fully qualified database backend name
try:
return import_module('.base', backend_name)
except ImportError, e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(__file__), 'backends')
try:
available_backends = [f for f in os.listdir(backend_dir)
if os.path.isdir(os.path.join(backend_dir, f))
and not f.startswith('.')]
except EnvironmentError:
available_backends = []
available_backends.sort()
if backend_name not in available_backends:
error_msg = ("%r isn't an available database backend. \n" +
"Try using django.db.backends.XXX, where XXX is one of:\n %s\n" +
"Error was: %s") % \
(backend_name, ", ".join(map(repr, available_backends)), e_user)
raise ImproperlyConfigured(error_msg)
else:
raise # If there's some other error, this must be an error in Django itself.
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases):
self.databases = databases
self._connections = {}
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('OPTIONS', {})
conn.setdefault('TEST_CHARSET', None)
conn.setdefault('TEST_COLLATION', None)
conn.setdefault('TEST_NAME', None)
conn.setdefault('TEST_MIRROR', None)
conn.setdefault('TIME_ZONE', settings.TIME_ZONE)
for setting in ('NAME', 'USER', 'PASSWORD', 'HOST', 'PORT'):
conn.setdefault(setting, '')
def __getitem__(self, alias):
if alias in self._connections:
return self._connections[alias]
self.ensure_defaults(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
self._connections[alias] = conn
return conn
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
class ConnectionRouter(object):
def __init__(self, routers):
self.routers = []
for r in routers:
if isinstance(r, basestring):
try:
module_name, klass_name = r.rsplit('.', 1)
module = import_module(module_name)
except ImportError, e:
raise ImproperlyConfigured('Error importing database router %s: "%s"' % (klass_name, e))
try:
router_class = getattr(module, klass_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a database router name "%s"' % (module, klass_name))
else:
router = router_class()
else:
router = r
self.routers.append(router)
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
chosen_db = getattr(router, action)(model, **hints)
if chosen_db:
return chosen_db
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
try:
return hints['instance']._state.db or DEFAULT_DB_ALIAS
except KeyError:
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
allow = router.allow_relation(obj1, obj2, **hints)
if allow is not None:
return allow
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
return obj1._state.db == obj2._state.db
def allow_syncdb(self, db, model):
for router in self.routers:
try:
allow = router.allow_syncdb(db, model)
if allow is not None:
return allow
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
return True
|
bsd-3-clause
|
rgacogne/pdns
|
build-scripts/docker/repo-test/generate-repo-files.py
|
6
|
8620
|
#!/usr/bin/env python3
#
# Given Python's versioning history I'm going with `python3`.
#
# Usage:
# - `python3 -m venv venv`
# - `bash`
# - `source venv/bin/activate`
# - `pip install --upgrade pip`
# - `pip install -r requirements.txt`
# - `./generate-repo-files.py auth-41`
# Modules
import argparse
import subprocess
import sys
from pathlib import Path
# since we use this at OX (or Ansible uses it, whatever)
from jinja2 import Environment, FileSystemLoader
# Globals
g_version = '0.0.1'
g_verbose = False
g_env = Environment(
loader=FileSystemLoader('templates/')
)
g_dockerfile = 'Dockerfile.'
g_run_output = False
# Init Functions
def init_argparser():
parser = argparse.ArgumentParser(description='Generate Docker files to ' +
'test PowerDNS repositories.')
parser.add_argument('release', metavar='RELEASE',
choices=[# Authoritative Server
'auth-42', 'auth-43', 'auth-44', 'auth-master',
# Recursor
'rec-42', 'rec-43', 'rec-44', 'rec-45',
'rec-master',
# DNSDist
'dnsdist-15', 'dnsdist-16', 'dnsdist-master'
],
help='the release to generate Docker files for: ' +
'%(choices)s')
parser.add_argument('--run-output', action='store_true',
help='always show output from running a container')
parser.add_argument('--test', action='store_true',
help='test the release')
parser.add_argument('--verbose', action='store_true',
help='verbose output')
parser.add_argument('--version', action='store_true',
help='print version')
return parser
# Release File Functions
def write_dockerfile (os, os_version, release):
tpl = g_env.get_template('Dockerfile-{}.jinja2'.format(os))
if os == 'raspbian':
os_image = 'resin/rpi-raspbian'
else:
os_image = os
if release.startswith('auth-'):
if os == 'centos':
pkg = 'pdns'
else:
pkg = 'pdns-server'
cmd = 'pdns_server'
elif release.startswith('rec-'):
pkg = 'pdns-recursor'
cmd = 'pdns_recursor'
elif release.startswith('dnsdist-'):
pkg = 'dnsdist'
cmd = 'dnsdist'
f = open('{}{}.{}-{}'.format(g_dockerfile, release, os, os_version), 'w')
# This comment was in the template for the `--nobest` part but that makes
# the template look even more different than the final output, so:
#
# > When should the logic be in the code and when in the template? :shrug:
# > I prefer it to be in the code but I also do not want to add extra vars
# > and logic to the code unless necessary.
f.write(tpl.render({ "os": os,
"os_image": os_image,
"os_version": os_version,
"release": release,
"cmd": cmd,
"pkg": pkg }))
f.close()
def write_list_file (os, os_version, release):
tpl = g_env.get_template('pdns-list.jinja2')
if os in ['debian', 'ubuntu']:
arch = ' [arch=amd64] '
else:
arch = ' '
f = open('pdns.list.{}.{}-{}'.format(release, os, os_version), 'w')
f.write(tpl.render({ "os": os,
"os_version": os_version,
"release": release,
"arch": arch }))
f.close()
def write_pkg_pin_file (release):
tpl = g_env.get_template('pkg-pin.jinja2')
if release.startswith('auth-') or release.startswith('rec-'):
pkg = 'pdns-'
elif release.startswith('dnsdist-'):
pkg = 'dnsdist'
f = open('pkg-pin', 'w')
f.write(tpl.render({ "pkg": pkg }))
f.close()
def write_release_files (release):
if g_verbose:
print("Writing release files...")
if release in ['auth-43', 'auth-master']:
write_dockerfile('centos', '6', release)
if release in ['auth-41', 'auth-42', 'auth-43', 'auth-44', 'auth-master',
'rec-42', 'rec-43', 'rec-44', 'rec-45', 'rec-master',
'dnsdist-15', 'dnsdist-16', 'dnsdist-master']:
write_dockerfile('centos', '7', release)
write_dockerfile('ubuntu', 'bionic', release)
write_list_file('ubuntu', 'bionic', release)
write_pkg_pin_file(release)
if release in ['auth-42', 'auth-43', 'auth-44', 'auth-master',
'rec-42', 'rec-43', 'rec-44', 'rec-45', 'rec-master',
'dnsdist-15', 'dnsdist-16', 'dnsdist-master']:
write_dockerfile('centos', '8', release)
write_dockerfile('debian', 'buster', release)
write_dockerfile('raspbian', 'buster', release)
write_list_file('debian', 'buster', release)
write_list_file('raspbian', 'buster', release)
if release in ['auth-43', 'auth-44', 'auth-master',
'rec-43', 'rec-44', 'rec-45', 'rec-master',
'dnsdist-15', 'dnsdist-16', 'dnsdist-master']:
write_dockerfile('ubuntu', 'focal', release)
write_list_file('ubuntu', 'focal', release)
# Test Release Functions
def build (dockerfile):
# Maybe create `determine_tag` function.
if len(str(dockerfile)) <= len(g_dockerfile):
print('Unable to determine tag for {}'.format(dockerfile))
return (None, None)
tag = str(dockerfile)[len(g_dockerfile):]
print('Building Docker image using {}...'.format(dockerfile))
if g_verbose:
print(' - tag = {}'.format(tag))
cp = subprocess.run(['docker', 'build', '--no-cache', '--pull', '--file',
dockerfile, '--tag', tag, '.'],
capture_output=not(g_verbose))
# FIXME write failed output to log
if cp.returncode != 0:
print('Error building {}: {}'.format(tag, repr(cp.returncode)))
return ( tag, cp.returncode )
return ( tag, cp.returncode )
def run (tag):
if g_run_output:
capture_run_output = False
else:
capture_run_output = not(g_verbose)
print('Running Docker container tagged {}...'.format(tag))
cp = subprocess.run(['docker', 'run', tag],
capture_output=capture_run_output)
# for some reason 99 is returned on `cmd --version` :shrug:
if cp.returncode != 0 and cp.returncode != 99:
# FIXME write failed output to log
print('Error running {}: {}'.format(tag, repr(cp.returncode)))
return cp.returncode
return cp.returncode
def collect_dockerfiles (release):
if g_verbose:
print('Collecting release files for {}...'.format(release))
p = Path('.')
files = list(p.glob('{}{}.*'.format(g_dockerfile, release)))
if g_verbose:
for file in files:
print(' - {}'.format(file))
return files
def test_release (release):
# sorted because we want determinism
dockerfiles = sorted(collect_dockerfiles(release))
failed_builds = []
failed_runs = []
print('=== testing {} ==='.format(release))
for df in dockerfiles:
if g_verbose:
print('--- {} ---'.format(df))
(tag, returncode) = build(df)
if returncode != 0:
print('Skipping running {} due to build error: {}'
.format(df, returncode))
failed_builds.append((str(df), returncode))
elif tag is None:
print('Skipping running {} due to undetermined tag.'.format(df))
failed_builds.append((str(df), returncode))
else:
returncode = run(tag)
# for some reason 99 is returned on `cmd --version` :shrug:
if returncode != 0 and returncode != 99:
failed_runs.append((tag, returncode))
print('Test done.')
if len(failed_builds) > 0:
print('- failed builds:')
for fb in failed_builds:
print(' - {}'.format(fb))
if len(failed_runs) > 0:
print('- failed runs:')
for fr in failed_runs:
print(' - {}'.format(fr))
# Main Program
parser = init_argparser()
args = parser.parse_args()
if args.version:
print('generate-repo-files v' + g_version)
sys.exit(0)
if args.verbose:
g_verbose = True
if args.run_output:
g_run_output = True
write_release_files(args.release)
if args.test:
test_release(args.release)
|
gpl-2.0
|
jmacmahon/invenio
|
modules/bibformat/lib/elements/bfe_url.py
|
16
|
1467
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints full-text URLs
"""
__revision__ = "$Id$"
def format_element(bfo, style, separator='; '):
"""
This is the default format for formatting full-text URLs.
@param separator: the separator between urls.
@param style: CSS class of the link
"""
urls_u = bfo.fields("8564_u")
if style != "":
style = 'class="'+style+'"'
urls = ['<a '+ style + \
'href="' + url + '">' + url +'</a>'
for url in urls_u]
return separator.join(urls)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
gpl-2.0
|
QGuLL/samba
|
python/samba/tests/__init__.py
|
18
|
12737
|
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2007-2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba Python tests."""
import os
import ldb
import samba
import samba.auth
from samba import param
from samba.samdb import SamDB
from samba import credentials
import subprocess
import sys
import tempfile
import unittest
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception):
"""Test skipped."""
HEXDUMP_FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
class TestCase(unittest.TestCase):
"""A Samba test case."""
def setUp(self):
super(TestCase, self).setUp()
test_debug_level = os.getenv("TEST_DEBUG_LEVEL")
if test_debug_level is not None:
test_debug_level = int(test_debug_level)
self._old_debug_level = samba.get_debug_level()
samba.set_debug_level(test_debug_level)
self.addCleanup(samba.set_debug_level, test_debug_level)
def get_loadparm(self):
return env_loadparm()
def get_credentials(self):
return cmdline_credentials
def hexdump(self, src):
N = 0
result = ''
while src:
ll = src[:8]
lr = src[8:16]
src = src[16:]
hl = ' '.join(["%02X" % ord(x) for x in ll])
hr = ' '.join(["%02X" % ord(x) for x in lr])
ll = ll.translate(HEXDUMP_FILTER)
lr = lr.translate(HEXDUMP_FILTER)
result += "[%04X] %-*s %-*s %s %s\n" % (N, 8*3, hl, 8*3, hr, ll, lr)
N += 16
return result
# These functions didn't exist before Python2.7:
if sys.version_info < (2, 7):
import warnings
def skipTest(self, reason):
raise SkipTest(reason)
def assertIn(self, member, container, msg=None):
self.assertTrue(member in container, msg)
def assertIs(self, a, b, msg=None):
self.assertTrue(a is b, msg)
def assertIsNot(self, a, b, msg=None):
self.assertTrue(a is not b, msg)
def assertIsNotNone(self, a, msg=None):
self.assertTrue(a is not None)
def assertIsInstance(self, a, b, msg=None):
self.assertTrue(isinstance(a, b), msg)
def assertIsNone(self, a, msg=None):
self.assertTrue(a is None, msg)
def assertGreater(self, a, b, msg=None):
self.assertTrue(a > b, msg)
def assertGreaterEqual(self, a, b, msg=None):
self.assertTrue(a >= b, msg)
def assertLess(self, a, b, msg=None):
self.assertTrue(a < b, msg)
def assertLessEqual(self, a, b, msg=None):
self.assertTrue(a <= b, msg)
def addCleanup(self, fn, *args, **kwargs):
self._cleanups = getattr(self, "_cleanups", []) + [
(fn, args, kwargs)]
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except SkipTest, e:
self._addSkip(result, str(e))
return
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
return
ok = False
try:
testMethod()
ok = True
except SkipTest, e:
self._addSkip(result, str(e))
return
except self.failureException:
result.addFailure(self, self._exc_info())
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
try:
self.tearDown()
except SkipTest, e:
self._addSkip(result, str(e))
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
ok = False
for (fn, args, kwargs) in reversed(getattr(self, "_cleanups", [])):
fn(*args, **kwargs)
if ok: result.addSuccess(self)
finally:
result.stopTest(self)
class LdbTestCase(TestCase):
"""Trivial test case for running tests against a LDB."""
def setUp(self):
super(LdbTestCase, self).setUp()
self.filename = os.tempnam()
self.ldb = samba.Ldb(self.filename)
def set_modules(self, modules=[]):
"""Change the modules for this Ldb."""
m = ldb.Message()
m.dn = ldb.Dn(self.ldb, "@MODULES")
m["@LIST"] = ",".join(modules)
self.ldb.add(m)
self.ldb = samba.Ldb(self.filename)
class TestCaseInTempDir(TestCase):
def setUp(self):
super(TestCaseInTempDir, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.addCleanup(self._remove_tempdir)
def _remove_tempdir(self):
self.assertEquals([], os.listdir(self.tempdir))
os.rmdir(self.tempdir)
self.tempdir = None
def env_loadparm():
lp = param.LoadParm()
try:
lp.load(os.environ["SMB_CONF_PATH"])
except KeyError:
raise KeyError("SMB_CONF_PATH not set")
return lp
def env_get_var_value(var_name):
"""Returns value for variable in os.environ
Function throws AssertionError if variable is defined.
Unit-test based python tests require certain input params
to be set in environment, otherwise they can't be run
"""
assert var_name in os.environ.keys(), "Please supply %s in environment" % var_name
return os.environ[var_name]
cmdline_credentials = None
class RpcInterfaceTestCase(TestCase):
"""DCE/RPC Test case."""
class ValidNetbiosNameTests(TestCase):
def test_valid(self):
self.assertTrue(samba.valid_netbios_name("FOO"))
def test_too_long(self):
self.assertFalse(samba.valid_netbios_name("FOO"*10))
def test_invalid_characters(self):
self.assertFalse(samba.valid_netbios_name("*BLA"))
class BlackboxProcessError(Exception):
"""This is raised when check_output() process returns a non-zero exit status
Exception instance should contain the exact exit code (S.returncode),
command line (S.cmd), process output (S.stdout) and process error stream
(S.stderr)
"""
def __init__(self, returncode, cmd, stdout, stderr):
self.returncode = returncode
self.cmd = cmd
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "Command '%s'; exit status %d; stdout: '%s'; stderr: '%s'" % (self.cmd, self.returncode,
self.stdout, self.stderr)
class BlackboxTestCase(TestCase):
"""Base test case for blackbox tests."""
def _make_cmdline(self, line):
bindir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../bin"))
parts = line.split(" ")
if os.path.exists(os.path.join(bindir, parts[0])):
parts[0] = os.path.join(bindir, parts[0])
line = " ".join(parts)
return line
def check_run(self, line):
line = self._make_cmdline(line)
p = subprocess.Popen(line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
retcode = p.wait()
if retcode:
raise BlackboxProcessError(retcode, line, p.stdout.read(), p.stderr.read())
def check_output(self, line):
line = self._make_cmdline(line)
p = subprocess.Popen(line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True)
retcode = p.wait()
if retcode:
raise BlackboxProcessError(retcode, line, p.stdout.read(), p.stderr.read())
return p.stdout.read()
def connect_samdb(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False, global_schema=True):
"""Create SamDB instance and connects to samdb_url database.
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
:param global_schema: Whether to use global schema.
Added value for tests is that we have a shorthand function
to make proper URL for ldb.connect() while using default
parameters for connection based on test environment
"""
if not "://" in samdb_url:
if not ldap_only and os.path.isfile(samdb_url):
samdb_url = "tdb://%s" % samdb_url
else:
samdb_url = "ldap://%s" % samdb_url
# use 'paged_search' module when connecting remotely
if samdb_url.startswith("ldap://"):
ldb_options = ["modules:paged_searches"]
elif ldap_only:
raise AssertionError("Trying to connect to %s while remote "
"connection is required" % samdb_url)
# set defaults for test environment
if lp is None:
lp = env_loadparm()
if session_info is None:
session_info = samba.auth.system_session(lp)
if credentials is None:
credentials = cmdline_credentials
return SamDB(url=samdb_url,
lp=lp,
session_info=session_info,
credentials=credentials,
flags=flags,
options=ldb_options,
global_schema=global_schema)
def connect_samdb_ex(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False):
"""Connects to samdb_url database
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
:return: (sam_db_connection, rootDse_record) tuple
"""
sam_db = connect_samdb(samdb_url, lp, session_info, credentials,
flags, ldb_options, ldap_only)
# fetch RootDse
res = sam_db.search(base="", expression="", scope=ldb.SCOPE_BASE,
attrs=["*"])
return (sam_db, res[0])
def connect_samdb_env(env_url, env_username, env_password, lp=None):
"""Connect to SamDB by getting URL and Credentials from environment
:param env_url: Environment variable name to get lsb url from
:param env_username: Username environment variable
:param env_password: Password environment variable
:return: sam_db_connection
"""
samdb_url = env_get_var_value(env_url)
creds = credentials.Credentials()
if lp is None:
# guess Credentials parameters here. Otherwise workstation
# and domain fields are NULL and gencache code segfalts
lp = param.LoadParm()
creds.guess(lp)
creds.set_username(env_get_var_value(env_username))
creds.set_password(env_get_var_value(env_password))
return connect_samdb(samdb_url, credentials=creds, lp=lp)
def delete_force(samdb, dn):
try:
samdb.delete(dn)
except ldb.LdbError, (num, errstr):
assert num == ldb.ERR_NO_SUCH_OBJECT, "ldb.delete() failed: %s" % errstr
|
gpl-3.0
|
cprov/snapcraft
|
tests/unit/plugins/test_catkin_tools.py
|
2
|
8676
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import re
from unittest import mock
from testtools.matchers import Contains, Equals
import snapcraft
from snapcraft.plugins import catkin_tools
from tests import unit
class CatkinToolsPluginBaseTestCase(unit.TestCase):
def setUp(self):
super().setUp()
class props:
rosdistro = "indigo"
ubuntu_distro = "trusty"
catkin_packages = ["my_package"]
source_space = "src"
source_subdir = None
include_roscore = False
catkin_cmake_args = []
underlay = None
rosinstall_files = None
build_attributes = []
self.properties = props()
self.project_options = snapcraft.ProjectOptions()
patcher = mock.patch("snapcraft.plugins._python.Pip")
self.pip_mock = patcher.start()
self.addCleanup(patcher.stop)
self.pip_mock.return_value.list.return_value = {}
class CatkinToolsPluginTestCase(CatkinToolsPluginBaseTestCase):
def setUp(self):
super().setUp()
self.project = snapcraft.ProjectOptions()
self.compilers = catkin_tools.Compilers(
"compilers_path", "sources", self.project
)
patcher = mock.patch("snapcraft.repo.Ubuntu")
self.ubuntu_mock = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch("subprocess.check_output")
self.check_output_mock = patcher.start()
self.addCleanup(patcher.stop)
@mock.patch("snapcraft.plugins.catkin_tools.Compilers")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "run")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "_run_in_bash")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "run_output", return_value="foo")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "_prepare_build")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "_finish_build")
def test_build_multiple(
self,
finish_build_mock,
prepare_build_mock,
run_output_mock,
bashrun_mock,
run_mock,
compilers_mock,
):
self.properties.catkin_packages.append("package_2")
plugin = catkin_tools.CatkinToolsPlugin(
"test-part", self.properties, self.project_options
)
os.makedirs(os.path.join(plugin.sourcedir, "src"))
plugin.build()
class check_pkg_arguments:
def __init__(self, test):
self.test = test
def __eq__(self, args):
index = args.index("build")
packages = args[index + 1 :]
self.test.assertIn("my_package", packages)
self.test.assertIn("package_2", packages)
return True
bashrun_mock.assert_called_with(check_pkg_arguments(self), env=mock.ANY)
finish_build_mock.assert_called_once_with()
@mock.patch("snapcraft.plugins.catkin_tools.Compilers")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "run")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "run_output", return_value="foo")
def test_build_runs_in_bash(self, run_output_mock, run_mock, compilers_mock):
plugin = catkin_tools.CatkinToolsPlugin(
"test-part", self.properties, self.project_options
)
os.makedirs(os.path.join(plugin.sourcedir, "src"))
plugin.build()
run_mock.assert_has_calls(
[mock.call(["/bin/bash", mock.ANY], cwd=mock.ANY, env=mock.ANY)]
)
@mock.patch("snapcraft.plugins.catkin.Compilers")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "run")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "_run_in_bash")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "run_output", return_value="foo")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "_prepare_build")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "_finish_build")
def test_build(
self,
finish_build_mock,
prepare_build_mock,
run_output_mock,
bashrun_mock,
run_mock,
compilers_mock,
):
plugin = catkin_tools.CatkinToolsPlugin(
"test-part", self.properties, self.project_options
)
os.makedirs(os.path.join(plugin.sourcedir, "src"))
plugin.build()
prepare_build_mock.assert_called_once_with()
class check_build_command:
def __eq__(self, args):
command = " ".join(args)
return (
args[0] == "catkin"
and "build" in command
and "my_package" in command
)
bashrun_mock.assert_called_with(check_build_command(), env=mock.ANY)
finish_build_mock.assert_called_once_with()
class PrepareBuildTestCase(CatkinToolsPluginBaseTestCase):
scenarios = [
(
"release without catkin-cmake-args",
{"build_attributes": [], "catkin_cmake_args": []},
),
(
"release with catkin-cmake-args",
{"build_attributes": [], "catkin_cmake_args": ["-DFOO"]},
),
(
"debug without catkin-cmake-args",
{"build_attributes": ["debug"], "catkin_cmake_args": []},
),
(
"debug with catkin-cmake-args",
{"build_attributes": ["debug"], "catkin_cmake_args": ["-DFOO"]},
),
]
def setUp(self):
super().setUp()
self.properties.build_attributes.extend(self.build_attributes)
self.properties.catkin_cmake_args = self.catkin_cmake_args
@mock.patch("snapcraft.plugins.catkin_tools.Compilers")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "_run_in_bash")
@mock.patch.object(catkin_tools.CatkinToolsPlugin, "_use_in_snap_python")
def test_prepare_build(self, use_python_mock, bashrun_mock, compilers_mock):
plugin = catkin_tools.CatkinToolsPlugin(
"test-part", self.properties, self.project_options
)
os.makedirs(os.path.join(plugin.rosdir, "test"))
plugin._prepare_build()
build_attributes = self.build_attributes
catkin_cmake_args = self.catkin_cmake_args
self.assertTrue(use_python_mock.called)
confArgs = bashrun_mock.mock_calls[0][1][0]
command = " ".join(confArgs)
self.assertThat(command, Contains("catkin init"))
confArgs = bashrun_mock.mock_calls[1][1][0]
command = " ".join(confArgs)
self.assertThat(command, Contains("catkin clean -y"))
confArgs = bashrun_mock.mock_calls[2][1][0]
command = " ".join(confArgs)
self.assertThat(command, Contains("catkin profile add -f default"))
confArgs = bashrun_mock.mock_calls[3][1][0]
self.assertThat(confArgs[0], Equals("catkin"))
self.assertThat(confArgs[1], Equals("config"))
command = " ".join(confArgs)
self.assertThat(command, Contains("--profile default"))
self.assertThat(command, Contains("--install"))
self.assertThat(command, Contains("--build-space {}".format(plugin.builddir)))
self.assertThat(
command,
Contains(
"--source-space {}".format(
os.path.join(plugin.builddir, plugin.options.source_space)
)
),
)
self.assertThat(command, Contains("--install-space {}".format(plugin.rosdir)))
expected_args = " ".join(self.catkin_cmake_args)
if catkin_cmake_args:
self.assertRegexpMatches(
command, ".*--cmake-args.*{}".format(re.escape(expected_args))
)
if "debug" in build_attributes:
self.assertRegexpMatches(
command, ".*--cmake-args.*-DCMAKE_BUILD_TYPE=Debug"
)
else:
self.assertRegexpMatches(
command, ".*--cmake-args.*-DCMAKE_BUILD_TYPE=Release"
)
|
gpl-3.0
|
aaltay/beam
|
sdks/python/apache_beam/runners/worker/bundle_processor.py
|
1
|
71716
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import bisect
import collections
import copy
import json
import logging
import random
import threading
from builtins import next
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Container
from typing import DefaultDict
from typing import Dict
from typing import FrozenSet
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from future.utils import itervalues
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
import apache_beam as beam
from apache_beam import coders
from apache_beam.coders import WindowedValueCoder
from apache_beam.coders import coder_impl
from apache_beam.internal import pickler
from apache_beam.io import iobase
from apache_beam.metrics import monitoring_infos
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import common
from apache_beam.runners import pipeline_context
from apache_beam.runners.worker import operation_specs
from apache_beam.runners.worker import operations
from apache_beam.runners.worker import statesampler
from apache_beam.transforms import TimeDomain
from apache_beam.transforms import core
from apache_beam.transforms import sideinputs
from apache_beam.transforms import userstate
from apache_beam.transforms import window
from apache_beam.utils import counters
from apache_beam.utils import proto_utils
from apache_beam.utils import timestamp
if TYPE_CHECKING:
from google.protobuf import message # pylint: disable=ungrouped-imports
from apache_beam import pvalue
from apache_beam.portability.api import metrics_pb2
from apache_beam.runners.sdf_utils import SplitResultPrimary
from apache_beam.runners.sdf_utils import SplitResultResidual
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.transforms.core import Windowing
from apache_beam.transforms.window import BoundedWindow
from apache_beam.utils import windowed_value
# This module is experimental. No backwards-compatibility guarantees.
T = TypeVar('T')
ConstructorFn = Callable[[
'BeamTransformFactory',
Any,
beam_runner_api_pb2.PTransform,
Union['message.Message', bytes],
Dict[str, List[operations.Operation]]
],
operations.Operation]
OperationT = TypeVar('OperationT', bound=operations.Operation)
FnApiUserRuntimeStateTypes = Union['ReadModifyWriteRuntimeState',
'CombiningValueRuntimeState',
'SynchronousSetRuntimeState',
'SynchronousBagRuntimeState']
DATA_INPUT_URN = 'beam:runner:source:v1'
DATA_OUTPUT_URN = 'beam:runner:sink:v1'
IDENTITY_DOFN_URN = 'beam:dofn:identity:0.1'
# TODO(vikasrk): Fix this once runner sends appropriate common_urns.
OLD_DATAFLOW_RUNNER_HARNESS_PARDO_URN = 'beam:dofn:javasdk:0.1'
OLD_DATAFLOW_RUNNER_HARNESS_READ_URN = 'beam:source:java:0.1'
URNS_NEEDING_PCOLLECTIONS = set([
monitoring_infos.ELEMENT_COUNT_URN, monitoring_infos.SAMPLED_BYTE_SIZE_URN
])
_LOGGER = logging.getLogger(__name__)
class RunnerIOOperation(operations.Operation):
"""Common baseclass for runner harness IO operations."""
def __init__(self,
name_context, # type: Union[str, common.NameContext]
step_name, # type: Any
consumers, # type: Mapping[Any, Iterable[operations.Operation]]
counter_factory, # type: counters.CounterFactory
state_sampler, # type: statesampler.StateSampler
windowed_coder, # type: coders.Coder
transform_id, # type: str
data_channel # type: data_plane.DataChannel
):
# type: (...) -> None
super(RunnerIOOperation,
self).__init__(name_context, None, counter_factory, state_sampler)
self.windowed_coder = windowed_coder
self.windowed_coder_impl = windowed_coder.get_impl()
# transform_id represents the consumer for the bytes in the data plane for a
# DataInputOperation or a producer of these bytes for a DataOutputOperation.
self.transform_id = transform_id
self.data_channel = data_channel
for _, consumer_ops in consumers.items():
for consumer in consumer_ops:
self.add_receiver(consumer, 0)
class DataOutputOperation(RunnerIOOperation):
"""A sink-like operation that gathers outputs to be sent back to the runner.
"""
def set_output_stream(self, output_stream):
# type: (data_plane.ClosableOutputStream) -> None
self.output_stream = output_stream
def process(self, windowed_value):
# type: (windowed_value.WindowedValue) -> None
self.windowed_coder_impl.encode_to_stream(
windowed_value, self.output_stream, True)
self.output_stream.maybe_flush()
def finish(self):
# type: () -> None
self.output_stream.close()
super(DataOutputOperation, self).finish()
class DataInputOperation(RunnerIOOperation):
"""A source-like operation that gathers input from the runner."""
def __init__(self,
operation_name, # type: Union[str, common.NameContext]
step_name,
consumers, # type: Mapping[Any, Iterable[operations.Operation]]
counter_factory, # type: counters.CounterFactory
state_sampler, # type: statesampler.StateSampler
windowed_coder, # type: coders.Coder
transform_id,
data_channel # type: data_plane.GrpcClientDataChannel
):
# type: (...) -> None
super(DataInputOperation, self).__init__(
operation_name,
step_name,
consumers,
counter_factory,
state_sampler,
windowed_coder,
transform_id=transform_id,
data_channel=data_channel)
# We must do this manually as we don't have a spec or spec.output_coders.
self.receivers = [
operations.ConsumerSet.create(
self.counter_factory,
self.name_context.step_name,
0,
next(iter(itervalues(consumers))),
self.windowed_coder,
self._get_runtime_performance_hints())
]
self.splitting_lock = threading.Lock()
self.index = -1
self.stop = float('inf')
self.started = False
def start(self):
# type: () -> None
super(DataInputOperation, self).start()
with self.splitting_lock:
self.started = True
def process(self, windowed_value):
# type: (windowed_value.WindowedValue) -> None
self.output(windowed_value)
def process_encoded(self, encoded_windowed_values):
# type: (bytes) -> None
input_stream = coder_impl.create_InputStream(encoded_windowed_values)
while input_stream.size() > 0:
with self.splitting_lock:
if self.index == self.stop - 1:
return
self.index += 1
decoded_value = self.windowed_coder_impl.decode_from_stream(
input_stream, True)
self.output(decoded_value)
def monitoring_infos(self, transform_id, tag_to_pcollection_id):
# type: (str, Dict[str, str]) -> Dict[FrozenSet, metrics_pb2.MonitoringInfo]
all_monitoring_infos = super(DataInputOperation, self).monitoring_infos(
transform_id, tag_to_pcollection_id)
read_progress_info = monitoring_infos.int64_counter(
monitoring_infos.DATA_CHANNEL_READ_INDEX,
self.index,
ptransform=transform_id)
all_monitoring_infos[monitoring_infos.to_key(
read_progress_info)] = read_progress_info
return all_monitoring_infos
# TODO(BEAM-7746): typing not compatible with super type
def try_split( # type: ignore[override]
self, fraction_of_remainder, total_buffer_size, allowed_split_points):
# type: (...) -> Optional[Tuple[int, Iterable[operations.SdfSplitResultsPrimary], Iterable[operations.SdfSplitResultsResidual], int]]
with self.splitting_lock:
if not self.started:
return None
if self.index == -1:
# We are "finished" with the (non-existent) previous element.
current_element_progress = 1.0
else:
current_element_progress_object = (
self.receivers[0].current_element_progress())
if current_element_progress_object is None:
current_element_progress = 0.5
else:
current_element_progress = (
current_element_progress_object.fraction_completed)
# Now figure out where to split.
split = self._compute_split(
self.index,
current_element_progress,
self.stop,
fraction_of_remainder,
total_buffer_size,
allowed_split_points,
self.receivers[0].try_split)
if split:
self.stop = split[-1]
return split
@staticmethod
def _compute_split(
index,
current_element_progress,
stop,
fraction_of_remainder,
total_buffer_size,
allowed_split_points=(),
try_split=lambda fraction: None):
def is_valid_split_point(index):
return not allowed_split_points or index in allowed_split_points
if total_buffer_size < index + 1:
total_buffer_size = index + 1
elif total_buffer_size > stop:
total_buffer_size = stop
# The units here (except for keep_of_element_remainder) are all in
# terms of number of (possibly fractional) elements.
remainder = total_buffer_size - index - current_element_progress
keep = remainder * fraction_of_remainder
if current_element_progress < 1:
keep_of_element_remainder = keep / (1 - current_element_progress)
# If it's less than what's left of the current element,
# try splitting at the current element.
if (keep_of_element_remainder < 1 and is_valid_split_point(index) and
is_valid_split_point(index + 1)):
split = try_split(
keep_of_element_remainder
) # type: Optional[Tuple[Iterable[operations.SdfSplitResultsPrimary], Iterable[operations.SdfSplitResultsResidual]]]
if split:
element_primaries, element_residuals = split
return index - 1, element_primaries, element_residuals, index + 1
# Otherwise, split at the closest element boundary.
# pylint: disable=round-builtin
stop_index = index + max(1, int(round(current_element_progress + keep)))
if allowed_split_points and stop_index not in allowed_split_points:
# Choose the closest allowed split point.
allowed_split_points = sorted(allowed_split_points)
closest = bisect.bisect(allowed_split_points, stop_index)
if closest == 0:
stop_index = allowed_split_points[0]
elif closest == len(allowed_split_points):
stop_index = allowed_split_points[-1]
else:
prev = allowed_split_points[closest - 1]
next = allowed_split_points[closest]
if index < prev and stop_index - prev < next - stop_index:
stop_index = prev
else:
stop_index = next
if index < stop_index < stop:
return stop_index - 1, [], [], stop_index
else:
return None
def finish(self):
# type: () -> None
with self.splitting_lock:
self.index += 1
self.started = False
def reset(self):
# type: () -> None
self.index = -1
self.stop = float('inf')
super(DataInputOperation, self).reset()
class _StateBackedIterable(object):
def __init__(self,
state_handler, # type: sdk_worker.CachingStateHandler
state_key, # type: beam_fn_api_pb2.StateKey
coder_or_impl, # type: Union[coders.Coder, coder_impl.CoderImpl]
):
# type: (...) -> None
self._state_handler = state_handler
self._state_key = state_key
if isinstance(coder_or_impl, coders.Coder):
self._coder_impl = coder_or_impl.get_impl()
else:
self._coder_impl = coder_or_impl
def __iter__(self):
# type: () -> Iterator[Any]
return iter(
self._state_handler.blocking_get(self._state_key, self._coder_impl))
def __reduce__(self):
return list, (list(self), )
coder_impl.FastPrimitivesCoderImpl.register_iterable_like_type(
_StateBackedIterable)
class StateBackedSideInputMap(object):
def __init__(self,
state_handler, # type: sdk_worker.CachingStateHandler
transform_id, # type: str
tag, # type: Optional[str]
side_input_data, # type: pvalue.SideInputData
coder # type: WindowedValueCoder
):
# type: (...) -> None
self._state_handler = state_handler
self._transform_id = transform_id
self._tag = tag
self._side_input_data = side_input_data
self._element_coder = coder.wrapped_value_coder
self._target_window_coder = coder.window_coder
# TODO(robertwb): Limit the cache size.
self._cache = {} # type: Dict[BoundedWindow, Any]
def __getitem__(self, window):
target_window = self._side_input_data.window_mapping_fn(window)
if target_window not in self._cache:
state_handler = self._state_handler
access_pattern = self._side_input_data.access_pattern
if access_pattern == common_urns.side_inputs.ITERABLE.urn:
state_key = beam_fn_api_pb2.StateKey(
iterable_side_input=beam_fn_api_pb2.StateKey.IterableSideInput(
transform_id=self._transform_id,
side_input_id=self._tag,
window=self._target_window_coder.encode(target_window)))
raw_view = _StateBackedIterable(
state_handler, state_key, self._element_coder)
elif access_pattern == common_urns.side_inputs.MULTIMAP.urn:
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id=self._transform_id,
side_input_id=self._tag,
window=self._target_window_coder.encode(target_window),
key=b''))
cache = {}
key_coder_impl = self._element_coder.key_coder().get_impl()
value_coder = self._element_coder.value_coder()
class MultiMap(object):
def __getitem__(self, key):
if key not in cache:
keyed_state_key = beam_fn_api_pb2.StateKey()
keyed_state_key.CopyFrom(state_key)
keyed_state_key.multimap_side_input.key = (
key_coder_impl.encode_nested(key))
cache[key] = _StateBackedIterable(
state_handler, keyed_state_key, value_coder)
return cache[key]
def __reduce__(self):
# TODO(robertwb): Figure out how to support this.
raise TypeError(common_urns.side_inputs.MULTIMAP.urn)
raw_view = MultiMap()
else:
raise ValueError("Unknown access pattern: '%s'" % access_pattern)
self._cache[target_window] = self._side_input_data.view_fn(raw_view)
return self._cache[target_window]
def is_globally_windowed(self):
# type: () -> bool
return (
self._side_input_data.window_mapping_fn ==
sideinputs._global_window_mapping_fn)
def reset(self):
# type: () -> None
# TODO(BEAM-5428): Cross-bundle caching respecting cache tokens.
self._cache = {}
class ReadModifyWriteRuntimeState(userstate.ReadModifyWriteRuntimeState):
def __init__(self, underlying_bag_state):
self._underlying_bag_state = underlying_bag_state
def read(self): # type: () -> Any
values = list(self._underlying_bag_state.read())
if not values:
return None
return values[0]
def write(self, value): # type: (Any) -> None
self.clear()
self._underlying_bag_state.add(value)
def clear(self): # type: () -> None
self._underlying_bag_state.clear()
def commit(self): # type: () -> None
self._underlying_bag_state.commit()
class CombiningValueRuntimeState(userstate.CombiningValueRuntimeState):
def __init__(self, underlying_bag_state, combinefn):
# type: (userstate.AccumulatingRuntimeState, core.CombineFn) -> None
self._combinefn = combinefn
self._combinefn.setup()
self._underlying_bag_state = underlying_bag_state
self._finalized = False
def _read_accumulator(self, rewrite=True):
merged_accumulator = self._combinefn.merge_accumulators(
self._underlying_bag_state.read())
if rewrite:
self._underlying_bag_state.clear()
self._underlying_bag_state.add(merged_accumulator)
return merged_accumulator
def read(self):
# type: () -> Iterable[Any]
return self._combinefn.extract_output(self._read_accumulator())
def add(self, value):
# type: (Any) -> None
# Prefer blind writes, but don't let them grow unboundedly.
# This should be tuned to be much lower, but for now exercise
# both paths well.
if random.random() < 0.5:
accumulator = self._read_accumulator(False)
self._underlying_bag_state.clear()
else:
accumulator = self._combinefn.create_accumulator()
self._underlying_bag_state.add(
self._combinefn.add_input(accumulator, value))
def clear(self):
# type: () -> None
self._underlying_bag_state.clear()
def commit(self):
self._underlying_bag_state.commit()
def finalize(self):
if not self._finalized:
self._combinefn.teardown()
self._finalized = True
class _ConcatIterable(object):
"""An iterable that is the concatination of two iterables.
Unlike itertools.chain, this allows reiteration.
"""
def __init__(self, first, second):
# type: (Iterable[Any], Iterable[Any]) -> None
self.first = first
self.second = second
def __iter__(self):
# type: () -> Iterator[Any]
for elem in self.first:
yield elem
for elem in self.second:
yield elem
coder_impl.FastPrimitivesCoderImpl.register_iterable_like_type(_ConcatIterable)
class SynchronousBagRuntimeState(userstate.BagRuntimeState):
def __init__(self,
state_handler, # type: sdk_worker.CachingStateHandler
state_key, # type: beam_fn_api_pb2.StateKey
value_coder # type: coders.Coder
):
# type: (...) -> None
self._state_handler = state_handler
self._state_key = state_key
self._value_coder = value_coder
self._cleared = False
self._added_elements = [] # type: List[Any]
def read(self):
# type: () -> Iterable[Any]
return _ConcatIterable([] if self._cleared else cast(
'Iterable[Any]',
_StateBackedIterable(
self._state_handler, self._state_key, self._value_coder)),
self._added_elements)
def add(self, value):
# type: (Any) -> None
self._added_elements.append(value)
def clear(self):
# type: () -> None
self._cleared = True
self._added_elements = []
def commit(self):
# type: () -> None
to_await = None
if self._cleared:
to_await = self._state_handler.clear(self._state_key)
if self._added_elements:
to_await = self._state_handler.extend(
self._state_key, self._value_coder.get_impl(), self._added_elements)
if to_await:
# To commit, we need to wait on the last state request future to complete.
to_await.get()
class SynchronousSetRuntimeState(userstate.SetRuntimeState):
def __init__(self,
state_handler, # type: sdk_worker.CachingStateHandler
state_key, # type: beam_fn_api_pb2.StateKey
value_coder # type: coders.Coder
):
# type: (...) -> None
self._state_handler = state_handler
self._state_key = state_key
self._value_coder = value_coder
self._cleared = False
self._added_elements = set() # type: Set[Any]
def _compact_data(self, rewrite=True):
accumulator = set(
_ConcatIterable(
set() if self._cleared else _StateBackedIterable(
self._state_handler, self._state_key, self._value_coder),
self._added_elements))
if rewrite and accumulator:
self._state_handler.clear(self._state_key)
self._state_handler.extend(
self._state_key, self._value_coder.get_impl(), accumulator)
# Since everthing is already committed so we can safely reinitialize
# added_elements here.
self._added_elements = set()
return accumulator
def read(self):
# type: () -> Set[Any]
return self._compact_data(rewrite=False)
def add(self, value):
# type: (Any) -> None
if self._cleared:
# This is a good time explicitly clear.
self._state_handler.clear(self._state_key)
self._cleared = False
self._added_elements.add(value)
if random.random() > 0.5:
self._compact_data()
def clear(self):
# type: () -> None
self._cleared = True
self._added_elements = set()
def commit(self):
# type: () -> None
to_await = None
if self._cleared:
to_await = self._state_handler.clear(self._state_key)
if self._added_elements:
to_await = self._state_handler.extend(
self._state_key, self._value_coder.get_impl(), self._added_elements)
if to_await:
# To commit, we need to wait on the last state request future to complete.
to_await.get()
class OutputTimer(userstate.BaseTimer):
def __init__(self,
key,
window, # type: BoundedWindow
timestamp, # type: timestamp.Timestamp
paneinfo, # type: windowed_value.PaneInfo
time_domain, # type: str
timer_family_id, # type: str
timer_coder_impl, # type: coder_impl.TimerCoderImpl
output_stream # type: data_plane.ClosableOutputStream
):
self._key = key
self._window = window
self._input_timestamp = timestamp
self._paneinfo = paneinfo
self._time_domain = time_domain
self._timer_family_id = timer_family_id
self._output_stream = output_stream
self._timer_coder_impl = timer_coder_impl
def set(self, ts: timestamp.TimestampTypes, dynamic_timer_tag='') -> None:
ts = timestamp.Timestamp.of(ts)
timer = userstate.Timer(
user_key=self._key,
dynamic_timer_tag=dynamic_timer_tag,
windows=(self._window, ),
clear_bit=False,
fire_timestamp=ts,
hold_timestamp=ts if TimeDomain.is_event_time(self._time_domain) else
self._input_timestamp,
paneinfo=self._paneinfo)
self._timer_coder_impl.encode_to_stream(timer, self._output_stream, True)
self._output_stream.maybe_flush()
def clear(self, dynamic_timer_tag='') -> None:
timer = userstate.Timer(
user_key=self._key,
dynamic_timer_tag=dynamic_timer_tag,
windows=(self._window, ),
clear_bit=True,
fire_timestamp=None,
hold_timestamp=None,
paneinfo=None)
self._timer_coder_impl.encode_to_stream(timer, self._output_stream, True)
self._output_stream.maybe_flush()
class TimerInfo(object):
"""A data class to store information related to a timer."""
def __init__(self, timer_coder_impl, output_stream=None):
self.timer_coder_impl = timer_coder_impl
self.output_stream = output_stream
class FnApiUserStateContext(userstate.UserStateContext):
"""Interface for state and timers from SDK to Fn API servicer of state.."""
def __init__(self,
state_handler, # type: sdk_worker.CachingStateHandler
transform_id, # type: str
key_coder, # type: coders.Coder
window_coder, # type: coders.Coder
):
# type: (...) -> None
"""Initialize a ``FnApiUserStateContext``.
Args:
state_handler: A StateServicer object.
transform_id: The name of the PTransform that this context is associated.
key_coder: Coder for the key type.
window_coder: Coder for the window type.
"""
self._state_handler = state_handler
self._transform_id = transform_id
self._key_coder = key_coder
self._window_coder = window_coder
# A mapping of {timer_family_id: TimerInfo}
self._timers_info = {} # type: Dict[str, TimerInfo]
self._all_states = {} # type: Dict[tuple, FnApiUserRuntimeStateTypes]
def add_timer_info(self, timer_family_id, timer_info):
# type: (str, TimerInfo) -> None
self._timers_info[timer_family_id] = timer_info
def get_timer(
self, timer_spec: userstate.TimerSpec, key, window, timestamp,
pane) -> OutputTimer:
assert self._timers_info[timer_spec.name].output_stream is not None
timer_coder_impl = self._timers_info[timer_spec.name].timer_coder_impl
output_stream = self._timers_info[timer_spec.name].output_stream
return OutputTimer(
key,
window,
timestamp,
pane,
timer_spec.time_domain,
timer_spec.name,
timer_coder_impl,
output_stream)
def get_state(self, *args):
# type: (*Any) -> FnApiUserRuntimeStateTypes
state_handle = self._all_states.get(args)
if state_handle is None:
state_handle = self._all_states[args] = self._create_state(*args)
return state_handle
def _create_state(self,
state_spec, # type: userstate.StateSpec
key,
window # type: BoundedWindow
):
# type: (...) -> FnApiUserRuntimeStateTypes
if isinstance(state_spec,
(userstate.BagStateSpec,
userstate.CombiningValueStateSpec,
userstate.ReadModifyWriteStateSpec)):
bag_state = SynchronousBagRuntimeState(
self._state_handler,
state_key=beam_fn_api_pb2.StateKey(
bag_user_state=beam_fn_api_pb2.StateKey.BagUserState(
transform_id=self._transform_id,
user_state_id=state_spec.name,
window=self._window_coder.encode(window),
# State keys are expected in nested encoding format
key=self._key_coder.encode_nested(key))),
value_coder=state_spec.coder)
if isinstance(state_spec, userstate.BagStateSpec):
return bag_state
elif isinstance(state_spec, userstate.ReadModifyWriteStateSpec):
return ReadModifyWriteRuntimeState(bag_state)
else:
return CombiningValueRuntimeState(
bag_state, copy.deepcopy(state_spec.combine_fn))
elif isinstance(state_spec, userstate.SetStateSpec):
return SynchronousSetRuntimeState(
self._state_handler,
state_key=beam_fn_api_pb2.StateKey(
bag_user_state=beam_fn_api_pb2.StateKey.BagUserState(
transform_id=self._transform_id,
user_state_id=state_spec.name,
window=self._window_coder.encode(window),
# State keys are expected in nested encoding format
key=self._key_coder.encode_nested(key))),
value_coder=state_spec.coder)
else:
raise NotImplementedError(state_spec)
def commit(self):
# type: () -> None
for state in self._all_states.values():
state.commit()
def reset(self):
# type: () -> None
for state in self._all_states.values():
state.finalize()
self._all_states = {}
def memoize(func):
cache = {}
missing = object()
def wrapper(*args):
result = cache.get(args, missing)
if result is missing:
result = cache[args] = func(*args)
return result
return wrapper
def only_element(iterable):
# type: (Iterable[T]) -> T
element, = iterable
return element
class BundleProcessor(object):
""" A class for processing bundles of elements. """
def __init__(self,
process_bundle_descriptor, # type: beam_fn_api_pb2.ProcessBundleDescriptor
state_handler, # type: sdk_worker.CachingStateHandler
data_channel_factory # type: data_plane.DataChannelFactory
):
# type: (...) -> None
"""Initialize a bundle processor.
Args:
process_bundle_descriptor (``beam_fn_api_pb2.ProcessBundleDescriptor``):
a description of the stage that this ``BundleProcessor``is to execute.
state_handler (CachingStateHandler).
data_channel_factory (``data_plane.DataChannelFactory``).
"""
self.process_bundle_descriptor = process_bundle_descriptor
self.state_handler = state_handler
self.data_channel_factory = data_channel_factory
# There is no guarantee that the runner only set
# timer_api_service_descriptor when having timers. So this field cannot be
# used as an indicator of timers.
if self.process_bundle_descriptor.timer_api_service_descriptor.url:
self.timer_data_channel = (
data_channel_factory.create_data_channel_from_url(
self.process_bundle_descriptor.timer_api_service_descriptor.url))
else:
self.timer_data_channel = None
# A mapping of
# {(transform_id, timer_family_id): TimerInfo}
# The mapping is empty when there is no timer_family_specs in the
# ProcessBundleDescriptor.
self.timers_info = {} # type: Dict[Tuple[str, str], TimerInfo]
# TODO(robertwb): Figure out the correct prefix to use for output counters
# from StateSampler.
self.counter_factory = counters.CounterFactory()
self.state_sampler = statesampler.StateSampler(
'fnapi-step-%s' % self.process_bundle_descriptor.id,
self.counter_factory)
self.ops = self.create_execution_tree(self.process_bundle_descriptor)
for op in self.ops.values():
op.setup()
self.splitting_lock = threading.Lock()
def create_execution_tree(
self,
descriptor # type: beam_fn_api_pb2.ProcessBundleDescriptor
):
# type: (...) -> collections.OrderedDict[str, operations.DoOperation]
transform_factory = BeamTransformFactory(
descriptor,
self.data_channel_factory,
self.counter_factory,
self.state_sampler,
self.state_handler)
self.timers_info = transform_factory.extract_timers_info()
def is_side_input(transform_proto, tag):
if transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn:
return tag in proto_utils.parse_Bytes(
transform_proto.spec.payload,
beam_runner_api_pb2.ParDoPayload).side_inputs
pcoll_consumers = collections.defaultdict(
list) # type: DefaultDict[str, List[str]]
for transform_id, transform_proto in descriptor.transforms.items():
for tag, pcoll_id in transform_proto.inputs.items():
if not is_side_input(transform_proto, tag):
pcoll_consumers[pcoll_id].append(transform_id)
@memoize
def get_operation(transform_id):
# type: (str) -> operations.Operation
transform_consumers = {
tag: [get_operation(op) for op in pcoll_consumers[pcoll_id]]
for tag,
pcoll_id in descriptor.transforms[transform_id].outputs.items()
}
return transform_factory.create_operation(
transform_id, transform_consumers)
# Operations must be started (hence returned) in order.
@memoize
def topological_height(transform_id):
# type: (str) -> int
return 1 + max([0] + [
topological_height(consumer)
for pcoll in descriptor.transforms[transform_id].outputs.values()
for consumer in pcoll_consumers[pcoll]
])
return collections.OrderedDict([(
transform_id,
cast(operations.DoOperation,
get_operation(transform_id))) for transform_id in sorted(
descriptor.transforms, key=topological_height, reverse=True)])
def reset(self):
# type: () -> None
self.counter_factory.reset()
self.state_sampler.reset()
# Side input caches.
for op in self.ops.values():
op.reset()
def process_bundle(self, instruction_id):
# type: (str) -> Tuple[List[beam_fn_api_pb2.DelayedBundleApplication], bool]
expected_input_ops = [] # type: List[DataInputOperation]
for op in self.ops.values():
if isinstance(op, DataOutputOperation):
# TODO(robertwb): Is there a better way to pass the instruction id to
# the operation?
op.set_output_stream(
op.data_channel.output_stream(instruction_id, op.transform_id))
elif isinstance(op, DataInputOperation):
# We must wait until we receive "end of stream" for each of these ops.
expected_input_ops.append(op)
try:
execution_context = ExecutionContext()
self.state_sampler.start()
# Start all operations.
for op in reversed(self.ops.values()):
_LOGGER.debug('start %s', op)
op.execution_context = execution_context
op.start()
# Each data_channel is mapped to a list of expected inputs which includes
# both data input and timer input. The data input is identied by
# transform_id. The data input is identified by
# (transform_id, timer_family_id).
data_channels = collections.defaultdict(
list
) # type: DefaultDict[data_plane.GrpcClientDataChannel, List[Union[str, Tuple[str, str]]]]
# Add expected data inputs for each data channel.
input_op_by_transform_id = {}
for input_op in expected_input_ops:
data_channels[input_op.data_channel].append(input_op.transform_id)
input_op_by_transform_id[input_op.transform_id] = input_op
# Update timer_data channel with expected timer inputs.
if self.timer_data_channel:
data_channels[self.timer_data_channel].extend(
list(self.timers_info.keys()))
# Set up timer output stream for DoOperation.
for ((transform_id, timer_family_id),
timer_info) in self.timers_info.items():
output_stream = self.timer_data_channel.output_timer_stream(
instruction_id, transform_id, timer_family_id)
timer_info.output_stream = output_stream
self.ops[transform_id].add_timer_info(timer_family_id, timer_info)
# Process data and timer inputs
for data_channel, expected_inputs in data_channels.items():
for element in data_channel.input_elements(instruction_id,
expected_inputs):
if isinstance(element, beam_fn_api_pb2.Elements.Timers):
timer_coder_impl = (
self.timers_info[(
element.transform_id,
element.timer_family_id)].timer_coder_impl)
for timer_data in timer_coder_impl.decode_all(element.timers):
self.ops[element.transform_id].process_timer(
element.timer_family_id, timer_data)
elif isinstance(element, beam_fn_api_pb2.Elements.Data):
input_op_by_transform_id[element.transform_id].process_encoded(
element.data)
# Finish all operations.
for op in self.ops.values():
_LOGGER.debug('finish %s', op)
op.finish()
# Close every timer output stream
for timer_info in self.timers_info.values():
assert timer_info.output_stream is not None
timer_info.output_stream.close()
return ([
self.delayed_bundle_application(op, residual) for op,
residual in execution_context.delayed_applications
],
self.requires_finalization())
finally:
# Ensure any in-flight split attempts complete.
with self.splitting_lock:
pass
self.state_sampler.stop_if_still_running()
def finalize_bundle(self):
# type: () -> beam_fn_api_pb2.FinalizeBundleResponse
for op in self.ops.values():
op.finalize_bundle()
return beam_fn_api_pb2.FinalizeBundleResponse()
def requires_finalization(self):
# type: () -> bool
return any(op.needs_finalization() for op in self.ops.values())
def try_split(self, bundle_split_request):
# type: (beam_fn_api_pb2.ProcessBundleSplitRequest) -> beam_fn_api_pb2.ProcessBundleSplitResponse
split_response = beam_fn_api_pb2.ProcessBundleSplitResponse()
with self.splitting_lock:
for op in self.ops.values():
if isinstance(op, DataInputOperation):
desired_split = bundle_split_request.desired_splits.get(
op.transform_id)
if desired_split:
split = op.try_split(
desired_split.fraction_of_remainder,
desired_split.estimated_input_elements,
desired_split.allowed_split_points)
if split:
(
primary_end,
element_primaries,
element_residuals,
residual_start,
) = split
for element_primary in element_primaries:
split_response.primary_roots.add().CopyFrom(
self.bundle_application(*element_primary))
for element_residual in element_residuals:
split_response.residual_roots.add().CopyFrom(
self.delayed_bundle_application(*element_residual))
split_response.channel_splits.extend([
beam_fn_api_pb2.ProcessBundleSplitResponse.ChannelSplit(
transform_id=op.transform_id,
last_primary_element=primary_end,
first_residual_element=residual_start)
])
return split_response
def delayed_bundle_application(self,
op, # type: operations.DoOperation
deferred_remainder # type: SplitResultResidual
):
# type: (...) -> beam_fn_api_pb2.DelayedBundleApplication
assert op.input_info is not None
# TODO(SDF): For non-root nodes, need main_input_coder + residual_coder.
(element_and_restriction, current_watermark, deferred_timestamp) = (
deferred_remainder)
if deferred_timestamp:
assert isinstance(deferred_timestamp, timestamp.Duration)
proto_deferred_watermark = proto_utils.from_micros(
duration_pb2.Duration,
deferred_timestamp.micros) # type: Optional[duration_pb2.Duration]
else:
proto_deferred_watermark = None
return beam_fn_api_pb2.DelayedBundleApplication(
requested_time_delay=proto_deferred_watermark,
application=self.construct_bundle_application(
op.input_info, current_watermark, element_and_restriction))
def bundle_application(self,
op, # type: operations.DoOperation
primary # type: SplitResultPrimary
):
# type: (...) -> beam_fn_api_pb2.BundleApplication
assert op.input_info is not None
return self.construct_bundle_application(
op.input_info, None, primary.primary_value)
def construct_bundle_application(self,
op_input_info, # type: operations.OpInputInfo
output_watermark, # type: Optional[timestamp.Timestamp]
element
):
# type: (...) -> beam_fn_api_pb2.BundleApplication
transform_id, main_input_tag, main_input_coder, outputs = op_input_info
if output_watermark:
proto_output_watermark = proto_utils.from_micros(
timestamp_pb2.Timestamp, output_watermark.micros)
output_watermarks = {
output: proto_output_watermark
for output in outputs
} # type: Optional[Dict[str, timestamp_pb2.Timestamp]]
else:
output_watermarks = None
return beam_fn_api_pb2.BundleApplication(
transform_id=transform_id,
input_id=main_input_tag,
output_watermarks=output_watermarks,
element=main_input_coder.get_impl().encode_nested(element))
def monitoring_infos(self):
# type: () -> List[metrics_pb2.MonitoringInfo]
"""Returns the list of MonitoringInfos collected processing this bundle."""
# Construct a new dict first to remove duplicates.
all_monitoring_infos_dict = {}
for transform_id, op in self.ops.items():
tag_to_pcollection_id = self.process_bundle_descriptor.transforms[
transform_id].outputs
all_monitoring_infos_dict.update(
op.monitoring_infos(transform_id, dict(tag_to_pcollection_id)))
return list(all_monitoring_infos_dict.values())
def shutdown(self):
# type: () -> None
for op in self.ops.values():
op.teardown()
class ExecutionContext(object):
def __init__(self):
self.delayed_applications = [
] # type: List[Tuple[operations.DoOperation, common.SplitResultResidual]]
class BeamTransformFactory(object):
"""Factory for turning transform_protos into executable operations."""
def __init__(self,
descriptor, # type: beam_fn_api_pb2.ProcessBundleDescriptor
data_channel_factory, # type: data_plane.DataChannelFactory
counter_factory, # type: counters.CounterFactory
state_sampler, # type: statesampler.StateSampler
state_handler # type: sdk_worker.CachingStateHandler
):
self.descriptor = descriptor
self.data_channel_factory = data_channel_factory
self.counter_factory = counter_factory
self.state_sampler = state_sampler
self.state_handler = state_handler
self.context = pipeline_context.PipelineContext(
descriptor,
iterable_state_read=lambda token,
element_coder_impl: _StateBackedIterable(
state_handler,
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
element_coder_impl))
_known_urns = {
} # type: Dict[str, Tuple[ConstructorFn, Union[Type[message.Message], Type[bytes], None]]]
@classmethod
def register_urn(
cls,
urn, # type: str
parameter_type # type: Optional[Type[T]]
):
# type: (...) -> Callable[[Callable[[BeamTransformFactory, str, beam_runner_api_pb2.PTransform, T, Dict[str, List[operations.Operation]]], operations.Operation]], Callable[[BeamTransformFactory, str, beam_runner_api_pb2.PTransform, T, Dict[str, List[operations.Operation]]], operations.Operation]]
def wrapper(func):
cls._known_urns[urn] = func, parameter_type
return func
return wrapper
def create_operation(self,
transform_id, # type: str
consumers # type: Dict[str, List[operations.Operation]]
):
# type: (...) -> operations.Operation
transform_proto = self.descriptor.transforms[transform_id]
if not transform_proto.unique_name:
_LOGGER.debug("No unique name set for transform %s" % transform_id)
transform_proto.unique_name = transform_id
creator, parameter_type = self._known_urns[transform_proto.spec.urn]
payload = proto_utils.parse_Bytes(
transform_proto.spec.payload, parameter_type)
return creator(self, transform_id, transform_proto, payload, consumers)
def extract_timers_info(self):
# type: () -> Dict[Tuple[str, str], TimerInfo]
timers_info = {}
for transform_id, transform_proto in self.descriptor.transforms.items():
if transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn:
pardo_payload = proto_utils.parse_Bytes(
transform_proto.spec.payload, beam_runner_api_pb2.ParDoPayload)
for (timer_family_id,
timer_family_spec) in pardo_payload.timer_family_specs.items():
timer_coder_impl = self.get_coder(
timer_family_spec.timer_family_coder_id).get_impl()
# The output_stream should be updated when processing a bundle.
timers_info[(transform_id, timer_family_id)] = TimerInfo(
timer_coder_impl=timer_coder_impl)
return timers_info
def get_coder(self, coder_id):
# type: (str) -> coders.Coder
if coder_id not in self.descriptor.coders:
raise KeyError("No such coder: %s" % coder_id)
coder_proto = self.descriptor.coders[coder_id]
if coder_proto.spec.urn:
return self.context.coders.get_by_id(coder_id)
else:
# No URN, assume cloud object encoding json bytes.
return operation_specs.get_coder_from_spec(
json.loads(coder_proto.spec.payload.decode('utf-8')))
def get_windowed_coder(self, pcoll_id):
# type: (str) -> WindowedValueCoder
coder = self.get_coder(self.descriptor.pcollections[pcoll_id].coder_id)
# TODO(robertwb): Remove this condition once all runners are consistent.
if not isinstance(coder, WindowedValueCoder):
windowing_strategy = self.descriptor.windowing_strategies[
self.descriptor.pcollections[pcoll_id].windowing_strategy_id]
return WindowedValueCoder(
coder, self.get_coder(windowing_strategy.window_coder_id))
else:
return coder
def get_output_coders(self, transform_proto):
# type: (beam_runner_api_pb2.PTransform) -> Dict[str, coders.Coder]
return {
tag: self.get_windowed_coder(pcoll_id)
for tag,
pcoll_id in transform_proto.outputs.items()
}
def get_only_output_coder(self, transform_proto):
# type: (beam_runner_api_pb2.PTransform) -> coders.Coder
return only_element(self.get_output_coders(transform_proto).values())
def get_input_coders(self, transform_proto):
# type: (beam_runner_api_pb2.PTransform) -> Dict[str, coders.WindowedValueCoder]
return {
tag: self.get_windowed_coder(pcoll_id)
for tag,
pcoll_id in transform_proto.inputs.items()
}
def get_only_input_coder(self, transform_proto):
# type: (beam_runner_api_pb2.PTransform) -> coders.Coder
return only_element(list(self.get_input_coders(transform_proto).values()))
def get_input_windowing(self, transform_proto):
# type: (beam_runner_api_pb2.PTransform) -> Windowing
pcoll_id = only_element(transform_proto.inputs.values())
windowing_strategy_id = self.descriptor.pcollections[
pcoll_id].windowing_strategy_id
return self.context.windowing_strategies.get_by_id(windowing_strategy_id)
# TODO(robertwb): Update all operations to take these in the constructor.
@staticmethod
def augment_oldstyle_op(
op, # type: OperationT
step_name, # type: str
consumers, # type: Mapping[str, Iterable[operations.Operation]]
tag_list=None # type: Optional[List[str]]
):
# type: (...) -> OperationT
op.step_name = step_name
for tag, op_consumers in consumers.items():
for consumer in op_consumers:
op.add_receiver(consumer, tag_list.index(tag) if tag_list else 0)
return op
@BeamTransformFactory.register_urn(
DATA_INPUT_URN, beam_fn_api_pb2.RemoteGrpcPort)
def create_source_runner(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
grpc_port, # type: beam_fn_api_pb2.RemoteGrpcPort
consumers # type: Dict[str, List[operations.Operation]]
):
# type: (...) -> DataInputOperation
output_coder = factory.get_coder(grpc_port.coder_id)
return DataInputOperation(
common.NameContext(transform_proto.unique_name, transform_id),
transform_proto.unique_name,
consumers,
factory.counter_factory,
factory.state_sampler,
output_coder,
transform_id=transform_id,
data_channel=factory.data_channel_factory.create_data_channel(grpc_port))
@BeamTransformFactory.register_urn(
DATA_OUTPUT_URN, beam_fn_api_pb2.RemoteGrpcPort)
def create_sink_runner(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
grpc_port, # type: beam_fn_api_pb2.RemoteGrpcPort
consumers # type: Dict[str, List[operations.Operation]]
):
# type: (...) -> DataOutputOperation
output_coder = factory.get_coder(grpc_port.coder_id)
return DataOutputOperation(
common.NameContext(transform_proto.unique_name, transform_id),
transform_proto.unique_name,
consumers,
factory.counter_factory,
factory.state_sampler,
output_coder,
transform_id=transform_id,
data_channel=factory.data_channel_factory.create_data_channel(grpc_port))
@BeamTransformFactory.register_urn(OLD_DATAFLOW_RUNNER_HARNESS_READ_URN, None)
def create_source_java(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
parameter,
consumers # type: Dict[str, List[operations.Operation]]
):
# type: (...) -> operations.ReadOperation
# The Dataflow runner harness strips the base64 encoding.
source = pickler.loads(base64.b64encode(parameter))
spec = operation_specs.WorkerRead(
iobase.SourceBundle(1.0, source, None, None),
[factory.get_only_output_coder(transform_proto)])
return factory.augment_oldstyle_op(
operations.ReadOperation(
common.NameContext(transform_proto.unique_name, transform_id),
spec,
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.deprecated_primitives.READ.urn, beam_runner_api_pb2.ReadPayload)
def create_deprecated_read(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
parameter, # type: beam_runner_api_pb2.ReadPayload
consumers # type: Dict[str, List[operations.Operation]]
):
# type: (...) -> operations.ReadOperation
source = iobase.BoundedSource.from_runner_api(
parameter.source, factory.context)
spec = operation_specs.WorkerRead(
iobase.SourceBundle(1.0, source, None, None),
[WindowedValueCoder(source.default_output_coder())])
return factory.augment_oldstyle_op(
operations.ReadOperation(
common.NameContext(transform_proto.unique_name, transform_id),
spec,
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
python_urns.IMPULSE_READ_TRANSFORM, beam_runner_api_pb2.ReadPayload)
def create_read_from_impulse_python(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
parameter, # type: beam_runner_api_pb2.ReadPayload
consumers # type: Dict[str, List[operations.Operation]]
):
# type: (...) -> operations.ImpulseReadOperation
return operations.ImpulseReadOperation(
common.NameContext(transform_proto.unique_name, transform_id),
factory.counter_factory,
factory.state_sampler,
consumers,
iobase.BoundedSource.from_runner_api(parameter.source, factory.context),
factory.get_only_output_coder(transform_proto))
@BeamTransformFactory.register_urn(OLD_DATAFLOW_RUNNER_HARNESS_PARDO_URN, None)
def create_dofn_javasdk(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
serialized_fn,
consumers # type: Dict[str, List[operations.Operation]]
):
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers, serialized_fn)
@BeamTransformFactory.register_urn(
common_urns.sdf_components.PAIR_WITH_RESTRICTION.urn,
beam_runner_api_pb2.ParDoPayload)
def create_pair_with_restriction(*args):
class PairWithRestriction(beam.DoFn):
def __init__(self, fn, restriction_provider, watermark_estimator_provider):
self.restriction_provider = restriction_provider
self.watermark_estimator_provider = watermark_estimator_provider
def process(self, element, *args, **kwargs):
# TODO(SDF): Do we want to allow mutation of the element?
# (E.g. it could be nice to shift bulky description to the portion
# that can be distributed.)
initial_restriction = self.restriction_provider.initial_restriction(
element)
initial_estimator_state = (
self.watermark_estimator_provider.initial_estimator_state(
element, initial_restriction))
yield (element, (initial_restriction, initial_estimator_state))
return _create_sdf_operation(PairWithRestriction, *args)
@BeamTransformFactory.register_urn(
common_urns.sdf_components.SPLIT_AND_SIZE_RESTRICTIONS.urn,
beam_runner_api_pb2.ParDoPayload)
def create_split_and_size_restrictions(*args):
class SplitAndSizeRestrictions(beam.DoFn):
def __init__(self, fn, restriction_provider, watermark_estimator_provider):
self.restriction_provider = restriction_provider
self.watermark_estimator_provider = watermark_estimator_provider
def process(self, element_restriction, *args, **kwargs):
element, (restriction, _) = element_restriction
for part, size in self.restriction_provider.split_and_size(
element, restriction):
estimator_state = (
self.watermark_estimator_provider.initial_estimator_state(
element, part))
yield ((element, (part, estimator_state)), size)
return _create_sdf_operation(SplitAndSizeRestrictions, *args)
@BeamTransformFactory.register_urn(
common_urns.sdf_components.TRUNCATE_SIZED_RESTRICTION.urn,
beam_runner_api_pb2.ParDoPayload)
def create_truncate_sized_restriction(*args):
class TruncateAndSizeRestriction(beam.DoFn):
def __init__(self, fn, restriction_provider, watermark_estimator_provider):
self.restriction_provider = restriction_provider
def process(self, element_restriction, *args, **kwargs):
((element, (restriction, estimator_state)), _) = element_restriction
truncated_restriction = self.restriction_provider.truncate(
element, restriction)
if truncated_restriction:
truncated_restriction_size = (
self.restriction_provider.restriction_size(
element, truncated_restriction))
yield ((element, (truncated_restriction, estimator_state)),
truncated_restriction_size)
return _create_sdf_operation(
TruncateAndSizeRestriction,
*args,
operation_cls=operations.SdfTruncateSizedRestrictions)
@BeamTransformFactory.register_urn(
common_urns.sdf_components.PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,
beam_runner_api_pb2.ParDoPayload)
def create_process_sized_elements_and_restrictions(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
parameter, # type: beam_runner_api_pb2.ParDoPayload
consumers # type: Dict[str, List[operations.Operation]]
):
return _create_pardo_operation(
factory,
transform_id,
transform_proto,
consumers,
core.DoFnInfo.from_runner_api(parameter.do_fn,
factory.context).serialized_dofn_data(),
parameter,
operation_cls=operations.SdfProcessSizedElements)
def _create_sdf_operation(
proxy_dofn,
factory,
transform_id,
transform_proto,
parameter,
consumers,
operation_cls=operations.DoOperation):
dofn_data = pickler.loads(parameter.do_fn.payload)
dofn = dofn_data[0]
restriction_provider = common.DoFnSignature(dofn).get_restriction_provider()
watermark_estimator_provider = (
common.DoFnSignature(dofn).get_watermark_estimator_provider())
serialized_fn = pickler.dumps(
(proxy_dofn(dofn, restriction_provider, watermark_estimator_provider), ) +
dofn_data[1:])
return _create_pardo_operation(
factory,
transform_id,
transform_proto,
consumers,
serialized_fn,
parameter,
operation_cls=operation_cls)
@BeamTransformFactory.register_urn(
common_urns.primitives.PAR_DO.urn, beam_runner_api_pb2.ParDoPayload)
def create_par_do(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
parameter, # type: beam_runner_api_pb2.ParDoPayload
consumers # type: Dict[str, List[operations.Operation]]
):
# type: (...) -> operations.DoOperation
return _create_pardo_operation(
factory,
transform_id,
transform_proto,
consumers,
core.DoFnInfo.from_runner_api(parameter.do_fn,
factory.context).serialized_dofn_data(),
parameter)
def _create_pardo_operation(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
consumers,
serialized_fn,
pardo_proto=None, # type: Optional[beam_runner_api_pb2.ParDoPayload]
operation_cls=operations.DoOperation
):
if pardo_proto and pardo_proto.side_inputs:
input_tags_to_coders = factory.get_input_coders(transform_proto)
tagged_side_inputs = [
(tag, beam.pvalue.SideInputData.from_runner_api(si, factory.context))
for tag,
si in pardo_proto.side_inputs.items()
]
tagged_side_inputs.sort(
key=lambda tag_si: sideinputs.get_sideinput_index(tag_si[0]))
side_input_maps = [
StateBackedSideInputMap(
factory.state_handler,
transform_id,
tag,
si,
input_tags_to_coders[tag]) for tag,
si in tagged_side_inputs
]
else:
side_input_maps = []
output_tags = list(transform_proto.outputs.keys())
dofn_data = pickler.loads(serialized_fn)
if not dofn_data[-1]:
# Windowing not set.
if pardo_proto:
other_input_tags = set.union(
set(pardo_proto.side_inputs),
set(pardo_proto.timer_family_specs)) # type: Container[str]
else:
other_input_tags = ()
pcoll_id, = [pcoll for tag, pcoll in transform_proto.inputs.items()
if tag not in other_input_tags]
windowing = factory.context.windowing_strategies.get_by_id(
factory.descriptor.pcollections[pcoll_id].windowing_strategy_id)
serialized_fn = pickler.dumps(dofn_data[:-1] + (windowing, ))
if pardo_proto and (pardo_proto.timer_family_specs or pardo_proto.state_specs
or pardo_proto.restriction_coder_id):
found_input_coder = None
for tag, pcoll_id in transform_proto.inputs.items():
if tag in pardo_proto.side_inputs:
pass
else:
# Must be the main input
assert found_input_coder is None
main_input_tag = tag
found_input_coder = factory.get_windowed_coder(pcoll_id)
assert found_input_coder is not None
main_input_coder = found_input_coder
if pardo_proto.timer_family_specs or pardo_proto.state_specs:
user_state_context = FnApiUserStateContext(
factory.state_handler,
transform_id,
main_input_coder.key_coder(),
main_input_coder.window_coder
) # type: Optional[FnApiUserStateContext]
else:
user_state_context = None
else:
user_state_context = None
output_coders = factory.get_output_coders(transform_proto)
spec = operation_specs.WorkerDoFn(
serialized_fn=serialized_fn,
output_tags=output_tags,
input=None,
side_inputs=None, # Fn API uses proto definitions and the Fn State API
output_coders=[output_coders[tag] for tag in output_tags])
result = factory.augment_oldstyle_op(
operation_cls(
common.NameContext(transform_proto.unique_name, transform_id),
spec,
factory.counter_factory,
factory.state_sampler,
side_input_maps,
user_state_context),
transform_proto.unique_name,
consumers,
output_tags)
if pardo_proto and pardo_proto.restriction_coder_id:
result.input_info = operations.OpInputInfo(
transform_id,
main_input_tag,
main_input_coder,
transform_proto.outputs.keys())
return result
def _create_simple_pardo_operation(factory, # type: BeamTransformFactory
transform_id,
transform_proto,
consumers,
dofn, # type: beam.DoFn
):
serialized_fn = pickler.dumps((dofn, (), {}, [], None))
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers, serialized_fn)
@BeamTransformFactory.register_urn(
common_urns.primitives.ASSIGN_WINDOWS.urn,
beam_runner_api_pb2.WindowingStrategy)
def create_assign_windows(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
parameter, # type: beam_runner_api_pb2.WindowingStrategy
consumers # type: Dict[str, List[operations.Operation]]
):
class WindowIntoDoFn(beam.DoFn):
def __init__(self, windowing):
self.windowing = windowing
def process(
self,
element,
timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam):
new_windows = self.windowing.windowfn.assign(
WindowFn.AssignContext(timestamp, element=element, window=window))
yield WindowedValue(element, timestamp, new_windows)
from apache_beam.transforms.core import Windowing
from apache_beam.transforms.window import WindowFn, WindowedValue
windowing = Windowing.from_runner_api(parameter, factory.context)
return _create_simple_pardo_operation(
factory,
transform_id,
transform_proto,
consumers,
WindowIntoDoFn(windowing))
@BeamTransformFactory.register_urn(IDENTITY_DOFN_URN, None)
def create_identity_dofn(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
parameter,
consumers # type: Dict[str, List[operations.Operation]]
):
# type: (...) -> operations.FlattenOperation
return factory.augment_oldstyle_op(
operations.FlattenOperation(
common.NameContext(transform_proto.unique_name, transform_id),
operation_specs.WorkerFlatten(
None, [factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PER_KEY_PRECOMBINE.urn,
beam_runner_api_pb2.CombinePayload)
def create_combine_per_key_precombine(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
payload, # type: beam_runner_api_pb2.CombinePayload
consumers # type: Dict[str, List[operations.Operation]]
):
# type: (...) -> operations.PGBKCVOperation
serialized_combine_fn = pickler.dumps((
beam.CombineFn.from_runner_api(payload.combine_fn,
factory.context), [], {}))
return factory.augment_oldstyle_op(
operations.PGBKCVOperation(
common.NameContext(transform_proto.unique_name, transform_id),
operation_specs.WorkerPartialGroupByKey(
serialized_combine_fn,
None, [factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler,
factory.get_input_windowing(transform_proto)),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PER_KEY_MERGE_ACCUMULATORS.urn,
beam_runner_api_pb2.CombinePayload)
def create_combbine_per_key_merge_accumulators(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
payload, # type: beam_runner_api_pb2.CombinePayload
consumers # type: Dict[str, List[operations.Operation]]
):
return _create_combine_phase_operation(
factory, transform_id, transform_proto, payload, consumers, 'merge')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PER_KEY_EXTRACT_OUTPUTS.urn,
beam_runner_api_pb2.CombinePayload)
def create_combine_per_key_extract_outputs(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
payload, # type: beam_runner_api_pb2.CombinePayload
consumers # type: Dict[str, List[operations.Operation]]
):
return _create_combine_phase_operation(
factory, transform_id, transform_proto, payload, consumers, 'extract')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PER_KEY_CONVERT_TO_ACCUMULATORS.urn,
beam_runner_api_pb2.CombinePayload)
def create_combine_per_key_convert_to_accumulators(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
payload, # type: beam_runner_api_pb2.CombinePayload
consumers # type: Dict[str, List[operations.Operation]]
):
return _create_combine_phase_operation(
factory, transform_id, transform_proto, payload, consumers, 'convert')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_GROUPED_VALUES.urn,
beam_runner_api_pb2.CombinePayload)
def create_combine_grouped_values(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
payload, # type: beam_runner_api_pb2.CombinePayload
consumers # type: Dict[str, List[operations.Operation]]
):
return _create_combine_phase_operation(
factory, transform_id, transform_proto, payload, consumers, 'all')
def _create_combine_phase_operation(
factory, transform_id, transform_proto, payload, consumers, phase):
# type: (...) -> operations.CombineOperation
serialized_combine_fn = pickler.dumps((
beam.CombineFn.from_runner_api(payload.combine_fn,
factory.context), [], {}))
return factory.augment_oldstyle_op(
operations.CombineOperation(
common.NameContext(transform_proto.unique_name, transform_id),
operation_specs.WorkerCombineFn(
serialized_combine_fn,
phase,
None, [factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(common_urns.primitives.FLATTEN.urn, None)
def create_flatten(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
payload,
consumers # type: Dict[str, List[operations.Operation]]
):
# type: (...) -> operations.FlattenOperation
return factory.augment_oldstyle_op(
operations.FlattenOperation(
common.NameContext(transform_proto.unique_name, transform_id),
operation_specs.WorkerFlatten(
None, [factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.primitives.MAP_WINDOWS.urn, beam_runner_api_pb2.FunctionSpec)
def create_map_windows(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
mapping_fn_spec, # type: beam_runner_api_pb2.FunctionSpec
consumers # type: Dict[str, List[operations.Operation]]
):
assert mapping_fn_spec.urn == python_urns.PICKLED_WINDOW_MAPPING_FN
window_mapping_fn = pickler.loads(mapping_fn_spec.payload)
class MapWindows(beam.DoFn):
def process(self, element):
key, window = element
return [(key, window_mapping_fn(window))]
return _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers, MapWindows())
@BeamTransformFactory.register_urn(
common_urns.primitives.MERGE_WINDOWS.urn, beam_runner_api_pb2.FunctionSpec)
def create_merge_windows(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
mapping_fn_spec, # type: beam_runner_api_pb2.FunctionSpec
consumers # type: Dict[str, List[operations.Operation]]
):
assert mapping_fn_spec.urn == python_urns.PICKLED_WINDOWFN
window_fn = pickler.loads(mapping_fn_spec.payload)
class MergeWindows(beam.DoFn):
def process(self, element):
nonce, windows = element
original_windows = set(windows) # type: Set[window.BoundedWindow]
merged_windows = collections.defaultdict(
set
) # type: MutableMapping[window.BoundedWindow, Set[window.BoundedWindow]]
class RecordingMergeContext(window.WindowFn.MergeContext):
def merge(
self,
to_be_merged, # type: Iterable[window.BoundedWindow]
merge_result, # type: window.BoundedWindow
):
originals = merged_windows[merge_result]
for window in to_be_merged:
if window in original_windows:
originals.add(window)
original_windows.remove(window)
else:
originals.update(merged_windows.pop(window))
window_fn.merge(RecordingMergeContext(windows))
yield nonce, (original_windows, merged_windows.items())
return _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers, MergeWindows())
@BeamTransformFactory.register_urn(common_urns.primitives.TO_STRING.urn, None)
def create_to_string_fn(
factory, # type: BeamTransformFactory
transform_id, # type: str
transform_proto, # type: beam_runner_api_pb2.PTransform
mapping_fn_spec, # type: beam_runner_api_pb2.FunctionSpec
consumers # type: Dict[str, List[operations.Operation]]
):
class ToString(beam.DoFn):
def process(self, element):
key, value = element
return [(key, str(value))]
return _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers, ToString())
|
apache-2.0
|
cervinko/calibre-web
|
vendor/requests/packages/chardet/mbcssm.py
|
982
|
19608
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7
4,4,4,4,4,0,0,0 # f8 - ff
)
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
# flake8: noqa
|
gpl-3.0
|
new-xiaji/peewee
|
playhouse/tests/test_read_slave.py
|
19
|
4913
|
from peewee import *
from peewee import Using
from playhouse.read_slave import ReadSlaveModel
from playhouse.tests.base import database_initializer
from playhouse.tests.base import ModelTestCase
queries = []
def reset():
global queries
queries = []
class QueryLogDatabase(SqliteDatabase):
name = ''
def execute_sql(self, query, *args, **kwargs):
queries.append((self.name, query))
return super(QueryLogDatabase, self).execute_sql(
query, *args, **kwargs)
class Master(QueryLogDatabase):
name = 'master'
class Slave1(QueryLogDatabase):
name = 'slave1'
class Slave2(QueryLogDatabase):
name = 'slave2'
master = database_initializer.get_database('sqlite', db_class=Master)
slave1 = database_initializer.get_database('sqlite', db_class=Slave1)
slave2 = database_initializer.get_database('sqlite', db_class=Slave2)
# Models to use for testing read slaves.
class BaseModel(ReadSlaveModel):
class Meta:
database = master
read_slaves = [slave1, slave2]
class User(BaseModel):
username = CharField()
class Thing(BaseModel):
name = CharField()
class Meta:
read_slaves = [slave2]
# Regular models to use for testing `Using`.
class BaseMasterOnly(Model):
class Meta:
database = master
class A(BaseMasterOnly):
data = CharField()
class B(BaseMasterOnly):
data = CharField()
class TestUsing(ModelTestCase):
requires = [A, B]
def setUp(self):
super(TestUsing, self).setUp()
reset()
def assertDatabaseVerb(self, expected):
db_and_verb = [(db, sql.split()[0]) for db, sql in queries]
self.assertEqual(db_and_verb, expected)
reset()
def test_using_context(self):
models = [A, B]
with Using(slave1, models, False):
A.create(data='a1')
B.create(data='b1')
self.assertDatabaseVerb([
('slave1', 'INSERT'),
('slave1', 'INSERT')])
with Using(slave2, models, False):
A.create(data='a2')
B.create(data='b2')
a_obj = A.select().order_by(A.id).get()
self.assertEqual(a_obj.data, 'a1')
self.assertDatabaseVerb([
('slave2', 'INSERT'),
('slave2', 'INSERT'),
('slave2', 'SELECT')])
with Using(master, models, False):
query = A.select().order_by(A.data.desc())
values = [a_obj.data for a_obj in query]
self.assertEqual(values, ['a2', 'a1'])
self.assertDatabaseVerb([('master', 'SELECT')])
def test_using_transactions(self):
with Using(slave1, [A]) as txn:
list(B.select())
A.create(data='a1')
B.create(data='b1')
self.assertDatabaseVerb([
('slave1', 'BEGIN'),
('master', 'SELECT'),
('slave1', 'INSERT'),
('master', 'INSERT')])
def fail_with_exc(data):
with Using(slave2, [A]):
A.create(data=data)
raise ValueError('xxx')
self.assertRaises(ValueError, fail_with_exc, 'a2')
self.assertDatabaseVerb([
('slave2', 'BEGIN'),
('slave2', 'INSERT')])
with Using(slave1, [A, B]):
a_objs = [a_obj.data for a_obj in A.select()]
self.assertEqual(a_objs, ['a1'])
class TestMasterSlave(ModelTestCase):
requires = [User, Thing]
def setUp(self):
super(TestMasterSlave, self).setUp()
User.create(username='peewee')
Thing.create(name='something')
reset()
def assertQueries(self, databases):
self.assertEqual([q[0] for q in queries], databases)
def test_balance_pair(self):
for i in range(6):
User.get()
self.assertQueries([
'slave1',
'slave2',
'slave1',
'slave2',
'slave1',
'slave2'])
def test_balance_single(self):
for i in range(3):
Thing.get()
self.assertQueries(['slave2', 'slave2', 'slave2'])
def test_query_types(self):
u = User.create(username='charlie')
User.select().where(User.username == 'charlie').get()
self.assertQueries(['master', 'slave1'])
User.get(User.username == 'charlie')
self.assertQueries(['master', 'slave1', 'slave2'])
u.username = 'edited'
u.save() # Update.
self.assertQueries(['master', 'slave1', 'slave2', 'master'])
u.delete_instance()
self.assertQueries(['master', 'slave1', 'slave2', 'master', 'master'])
def test_raw_queries(self):
User.raw('insert into user (username) values (?)', 'charlie').execute()
rq = list(User.raw('select * from user where username = ?', 'charlie'))
self.assertEqual(rq[0].username, 'charlie')
self.assertQueries(['master', 'slave1'])
|
mit
|
philsch/ansible
|
lib/ansible/modules/windows/win_unzip.py
|
5
|
4047
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Phil Schwartz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_unzip
version_added: "2.0"
short_description: Unzips compressed files and archives on the Windows node
description:
- Unzips compressed files and archives.
- Supports .zip files natively
- Supports other formats supported by the Powershell Community Extensions (PSCX) module (basically everything 7zip supports)
- For non-Windows targets, use the M(unarchive) module instead.
requirements:
- PSCX
options:
src:
description:
- File to be unzipped (provide absolute path)
required: true
dest:
description:
- Destination of zip file (provide absolute path of directory). If it does not exist, the directory will be created.
required: true
rm:
description:
- Remove the zip file, after unzipping
required: no
choices:
- true
- false
- yes
- no
default: false
recurse:
description:
- Recursively expand zipped files within the src file.
required: no
default: false
choices:
- true
- false
- yes
- no
creates:
description:
- If this file or directory exists the specified src will not be extracted.
required: no
default: null
notes:
- For extracting any compression types other than .zip, the PowerShellCommunityExtensions (PSCX) Module is required. This module (in conjunction with PSCX)
has the ability to recursively unzip files within the src zip file provided and also functionality for many other compression types. If the destination
directory does not exist, it will be created before unzipping the file. Specifying rm parameter will force removal of the src file after extraction.
- For non-Windows targets, use the M(unarchive) module instead.
author: Phil Schwartz
'''
EXAMPLES = r'''
# This unzips a library that was downloaded with win_get_url, and removes the file after extraction
# $ ansible -i hosts -m win_unzip -a "src=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=true" all
# Playbook example
# Simple unzip
---
- name: Unzip a bz2 (BZip) file
win_unzip:
src: C:\Users\Phil\Logs.bz2
dest: C:\Users\Phil\OldLogs
creates: C:\Users\Phil\OldLogs
# This playbook example unzips a .zip file and recursively decompresses the contained .gz files and removes all unneeded compressed files after completion.
- name: Unzip ApplicationLogs.zip and decompress all GZipped log files
hosts: all
gather_facts: false
tasks:
- name: Recursively decompress GZ files in ApplicationLogs.zip
win_unzip:
src: C:\Downloads\ApplicationLogs.zip
dest: C:\Application\Logs
recurse: yes
rm: true
# Install PSCX to use for extracting a gz file
- name: Grab PSCX msi
win_get_url:
url: http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=pscx&DownloadId=923562&FileTime=130585918034470000&Build=20959
dest: C:\pscx.msi
- name: Install PSCX
win_msi:
path: C:\pscx.msi
- name: Unzip gz log
win_unzip:
src: C:\Logs\application-error-logs.gz
dest: C:\ExtractedLogs\application-error-logs
'''
|
gpl-3.0
|
tkaitchuck/nupic
|
external/common/lib/python2.6/site-packages/logilab/common/registry.py
|
2
|
41325
|
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of Logilab-common.
#
# Logilab-common is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# Logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""This module provides bases for predicates dispatching (the pattern in use
here is similar to what's refered as multi-dispatch or predicate-dispatch in the
literature, though a bit different since the idea is to select across different
implementation 'e.g. classes), not to dispatch a message to a function or
method. It contains the following classes:
* :class:`RegistryStore`, the top level object which loads implementation
objects and stores them into registries. You'll usually use it to access
registries and their contained objects;
* :class:`Registry`, the base class which contains objects semantically grouped
(for instance, sharing a same API, hence the 'implementation' name). You'll
use it to select the proper implementation according to a context. Notice you
may use registries on their own without using the store.
.. Note::
implementation objects are usually designed to be accessed through the
registry and not by direct instantiation, besides to use it as base classe.
The selection procedure is delegated to a selector, which is responsible for
scoring the object according to some context. At the end of the selection, if an
implementation has been found, an instance of this class is returned. A selector
is built from one or more predicates combined together using AND, OR, NOT
operators (actually `&`, `|` and `~`). You'll thus find some base classes to
build predicates:
* :class:`Predicate`, the abstract base predicate class
* :class:`AndPredicate`, :class:`OrPredicate`, :class:`NotPredicate`, which you
shouldn't have to use directly. You'll use `&`, `|` and '~' operators between
predicates directly
* :func:`objectify_predicate`
You'll eventually find one concrete predicate: :class:`yes`
.. autoclass:: RegistryStore
.. autoclass:: Registry
Predicates
----------
.. autoclass:: Predicate
.. autofunc:: objectify_predicate
.. autoclass:: yes
Debugging
---------
.. autoclass:: traced_selection
Exceptions
----------
.. autoclass:: RegistryException
.. autoclass:: RegistryNotFound
.. autoclass:: ObjectNotFound
.. autoclass:: NoSelectableObject
"""
__docformat__ = "restructuredtext en"
import sys
import types
import weakref
import traceback as tb
from os import listdir, stat
from os.path import join, isdir, exists
from logging import getLogger
from warnings import warn
from logilab.common.modutils import modpath_from_file
from logilab.common.logging_ext import set_log_methods
from logilab.common.decorators import classproperty
class RegistryException(Exception):
"""Base class for registry exception."""
class RegistryNotFound(RegistryException):
"""Raised when an unknown registry is requested.
This is usually a programming/typo error.
"""
class ObjectNotFound(RegistryException):
"""Raised when an unregistered object is requested.
This may be a programming/typo or a misconfiguration error.
"""
class NoSelectableObject(RegistryException):
"""Raised when no object is selectable for a given context."""
def __init__(self, args, kwargs, objects):
self.args = args
self.kwargs = kwargs
self.objects = objects
def __str__(self):
return ('args: %s, kwargs: %s\ncandidates: %s'
% (self.args, self.kwargs.keys(), self.objects))
def _modname_from_path(path, extrapath=None):
modpath = modpath_from_file(path, extrapath)
# omit '__init__' from package's name to avoid loading that module
# once for each name when it is imported by some other object
# module. This supposes import in modules are done as::
#
# from package import something
#
# not::
#
# from package.__init__ import something
#
# which seems quite correct.
if modpath[-1] == '__init__':
modpath.pop()
return '.'.join(modpath)
def _toload_info(path, extrapath, _toload=None):
"""Return a dictionary of <modname>: <modpath> and an ordered list of
(file, module name) to load
"""
if _toload is None:
assert isinstance(path, list)
_toload = {}, []
for fileordir in path:
if isdir(fileordir) and exists(join(fileordir, '__init__.py')):
subfiles = [join(fileordir, fname) for fname in listdir(fileordir)]
_toload_info(subfiles, extrapath, _toload)
elif fileordir[-3:] == '.py':
modname = _modname_from_path(fileordir, extrapath)
_toload[0][modname] = fileordir
_toload[1].append((fileordir, modname))
return _toload
class RegistrableObject(object):
"""This is the base class for registrable objects which are selected
according to a context.
:attr:`__registry__`
name of the registry for this object (string like 'views',
'templates'...). You may want to define `__registries__` directly if your
object should be registered in several registries.
:attr:`__regid__`
object's identifier in the registry (string like 'main',
'primary', 'folder_box')
:attr:`__select__`
class'selector
Moreover, the `__abstract__` attribute may be set to True to indicate that a
class is abstract and should not be registered.
You don't have to inherit from this class to put it in a registry (having
`__regid__` and `__select__` is enough), though this is needed for classes
that should be automatically registered.
"""
__registry__ = None
__regid__ = None
__select__ = None
__abstract__ = True # see doc snipppets below (in Registry class)
@classproperty
def __registries__(cls):
if cls.__registry__ is None:
return ()
return (cls.__registry__,)
class RegistrableInstance(RegistrableObject):
"""Inherit this class if you want instances of the classes to be
automatically registered.
"""
def __new__(cls, *args, **kwargs):
"""Add a __module__ attribute telling the module where the instance was
created, for automatic registration.
"""
obj = super(RegistrableInstance, cls).__new__(cls)
# XXX subclass must no override __new__
filepath = tb.extract_stack(limit=2)[0][0]
obj.__module__ = _modname_from_path(filepath)
return obj
class Registry(dict):
"""The registry store a set of implementations associated to identifier:
* to each identifier are associated a list of implementations
* to select an implementation of a given identifier, you should use one of the
:meth:`select` or :meth:`select_or_none` method
* to select a list of implementations for a context, you should use the
:meth:`possible_objects` method
* dictionary like access to an identifier will return the bare list of
implementations for this identifier.
To be usable in a registry, the only requirement is to have a `__select__`
attribute.
At the end of the registration process, the :meth:`__registered__`
method is called on each registered object which have them, given the
registry in which it's registered as argument.
Registration methods:
.. automethod: register
.. automethod: unregister
Selection methods:
.. automethod: select
.. automethod: select_or_none
.. automethod: possible_objects
.. automethod: object_by_id
"""
def __init__(self, debugmode):
super(Registry, self).__init__()
self.debugmode = debugmode
def __getitem__(self, name):
"""return the registry (list of implementation objects) associated to
this name
"""
try:
return super(Registry, self).__getitem__(name)
except KeyError:
raise ObjectNotFound(name), None, sys.exc_info()[-1]
@classmethod
def objid(cls, obj):
"""returns a unique identifier for an object stored in the registry"""
return '%s.%s' % (obj.__module__, cls.objname(obj))
@classmethod
def objname(cls, obj):
"""returns a readable name for an object stored in the registry"""
return getattr(obj, '__name__', id(obj))
def initialization_completed(self):
"""call method __registered__() on registered objects when the callback
is defined"""
for objects in self.itervalues():
for objectcls in objects:
registered = getattr(objectcls, '__registered__', None)
if registered:
registered(self)
if self.debugmode:
wrap_predicates(_lltrace)
def register(self, obj, oid=None, clear=False):
"""base method to add an object in the registry"""
assert not '__abstract__' in obj.__dict__, obj
assert obj.__select__, obj
oid = oid or obj.__regid__
assert oid, ('no explicit name supplied to register object %s, '
'which has no __regid__ set' % obj)
if clear:
objects = self[oid] = []
else:
objects = self.setdefault(oid, [])
assert not obj in objects, 'object %s is already registered' % obj
objects.append(obj)
def register_and_replace(self, obj, replaced):
"""remove <replaced> and register <obj>"""
# XXXFIXME this is a duplication of unregister()
# remove register_and_replace in favor of unregister + register
# or simplify by calling unregister then register here
if not isinstance(replaced, basestring):
replaced = self.objid(replaced)
# prevent from misspelling
assert obj is not replaced, 'replacing an object by itself: %s' % obj
registered_objs = self.get(obj.__regid__, ())
for index, registered in enumerate(registered_objs):
if self.objid(registered) == replaced:
del registered_objs[index]
break
else:
self.warning('trying to replace %s that is not registered with %s',
replaced, obj)
self.register(obj)
def unregister(self, obj):
"""remove object <obj> from this registry"""
objid = self.objid(obj)
oid = obj.__regid__
for registered in self.get(oid, ()):
# use self.objid() to compare objects because vreg will probably
# have its own version of the object, loaded through execfile
if self.objid(registered) == objid:
self[oid].remove(registered)
break
else:
self.warning('can\'t remove %s, no id %s in the registry',
objid, oid)
def all_objects(self):
"""return a list containing all objects in this registry.
"""
result = []
for objs in self.values():
result += objs
return result
# dynamic selection methods ################################################
def object_by_id(self, oid, *args, **kwargs):
"""return object with the `oid` identifier. Only one object is expected
to be found.
raise :exc:`ObjectNotFound` if not object with id <oid> in <registry>
raise :exc:`AssertionError` if there is more than one object there
"""
objects = self[oid]
assert len(objects) == 1, objects
return objects[0](*args, **kwargs)
def select(self, __oid, *args, **kwargs):
"""return the most specific object among those with the given oid
according to the given context.
raise :exc:`ObjectNotFound` if not object with id <oid> in <registry>
raise :exc:`NoSelectableObject` if not object apply
"""
obj = self._select_best(self[__oid], *args, **kwargs)
if obj is None:
raise NoSelectableObject(args, kwargs, self[__oid] )
return obj
def select_or_none(self, __oid, *args, **kwargs):
"""return the most specific object among those with the given oid
according to the given context, or None if no object applies.
"""
try:
return self.select(__oid, *args, **kwargs)
except (NoSelectableObject, ObjectNotFound):
return None
def possible_objects(self, *args, **kwargs):
"""return an iterator on possible objects in this registry for the given
context
"""
for objects in self.itervalues():
obj = self._select_best(objects, *args, **kwargs)
if obj is None:
continue
yield obj
def _select_best(self, objects, *args, **kwargs):
"""return an instance of the most specific object according
to parameters
return None if not object apply (don't raise `NoSelectableObject` since
it's costly when searching objects using `possible_objects`
(e.g. searching for hooks).
"""
score, winners = 0, None
for obj in objects:
objectscore = obj.__select__(obj, *args, **kwargs)
if objectscore > score:
score, winners = objectscore, [obj]
elif objectscore > 0 and objectscore == score:
winners.append(obj)
if winners is None:
return None
if len(winners) > 1:
# log in production environement / test, error while debugging
msg = 'select ambiguity: %s\n(args: %s, kwargs: %s)'
if self.debugmode:
# raise bare exception in debug mode
raise Exception(msg % (winners, args, kwargs.keys()))
self.error(msg, winners, args, kwargs.keys())
# return the result of calling the object
return self.selected(winners[0], args, kwargs)
def selected(self, winner, args, kwargs):
"""override here if for instance you don't want "instanciation"
"""
return winner(*args, **kwargs)
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
def obj_registries(cls, registryname=None):
"""return a tuple of registry names (see __registries__)"""
if registryname:
return (registryname,)
return cls.__registries__
class RegistryStore(dict):
"""This class is responsible for loading objects and storing them
in their registry which is created on the fly as needed.
It handles dynamic registration of objects and provides a
convenient api to access them. To be recognized as an object that
should be stored into one of the store's registry
(:class:`Registry`), an object must provide the following
attributes, used control how they interact with the registry:
:attr:`__registries__`
list of registry names (string like 'views', 'templates'...) into which
the object should be registered
:attr:`__regid__`
object identifier in the registry (string like 'main',
'primary', 'folder_box')
:attr:`__select__`
the object predicate selectors
Moreover, the :attr:`__abstract__` attribute may be set to `True`
to indicate that an object is abstract and should not be registered
(such inherited attributes not considered).
.. Note::
When using the store to load objects dynamically, you *always* have
to use **super()** to get the methods and attributes of the
superclasses, and not use the class identifier. If not, you'll get into
trouble at reload time.
For example, instead of writing::
class Thing(Parent):
__regid__ = 'athing'
__select__ = yes()
def f(self, arg1):
Parent.f(self, arg1)
You must write::
class Thing(Parent):
__regid__ = 'athing'
__select__ = yes()
def f(self, arg1):
super(Thing, self).f(arg1)
Controlling object registration
-------------------------------
Dynamic loading is triggered by calling the
:meth:`register_objects` method, given a list of directories to
inspect for python modules.
.. automethod: register_objects
For each module, by default, all compatible objects are registered
automatically. However if some objects come as replacement of
other objects, or have to be included only if some condition is
met, you'll have to define a `registration_callback(vreg)`
function in the module and explicitly register **all objects** in
this module, using the api defined below.
.. automethod:: RegistryStore.register_all
.. automethod:: RegistryStore.register_and_replace
.. automethod:: RegistryStore.register
.. automethod:: RegistryStore.unregister
.. Note::
Once the function `registration_callback(vreg)` is implemented in a
module, all the objects from this module have to be explicitly
registered as it disables the automatic object registration.
Examples:
.. sourcecode:: python
def registration_callback(store):
# register everything in the module except BabarClass
store.register_all(globals().values(), __name__, (BabarClass,))
# conditionally register BabarClass
if 'babar_relation' in store.schema:
store.register(BabarClass)
In this example, we register all application object classes defined in the module
except `BabarClass`. This class is then registered only if the 'babar_relation'
relation type is defined in the instance schema.
.. sourcecode:: python
def registration_callback(store):
store.register(Elephant)
# replace Babar by Celeste
store.register_and_replace(Celeste, Babar)
In this example, we explicitly register classes one by one:
* the `Elephant` class
* the `Celeste` to replace `Babar`
If at some point we register a new appobject class in this module, it won't be
registered at all without modification to the `registration_callback`
implementation. The first example will register it though, thanks to the call
to the `register_all` method.
Controlling registry instantiation
----------------------------------
The `REGISTRY_FACTORY` class dictionary allows to specify which class should
be instantiated for a given registry name. The class associated to `None`
key will be the class used when there is no specific class for a name.
"""
def __init__(self, debugmode=False):
super(RegistryStore, self).__init__()
self.debugmode = debugmode
def reset(self):
"""clear all registries managed by this store"""
# don't use self.clear, we want to keep existing subdictionaries
for subdict in self.itervalues():
subdict.clear()
self._lastmodifs = {}
def __getitem__(self, name):
"""return the registry (dictionary of class objects) associated to
this name
"""
try:
return super(RegistryStore, self).__getitem__(name)
except KeyError:
raise RegistryNotFound(name), None, sys.exc_info()[-1]
# methods for explicit (un)registration ###################################
# default class, when no specific class set
REGISTRY_FACTORY = {None: Registry}
def registry_class(self, regid):
"""return existing registry named regid or use factory to create one and
return it"""
try:
return self.REGISTRY_FACTORY[regid]
except KeyError:
return self.REGISTRY_FACTORY[None]
def setdefault(self, regid):
try:
return self[regid]
except RegistryNotFound:
self[regid] = self.registry_class(regid)(self.debugmode)
return self[regid]
def register_all(self, objects, modname, butclasses=()):
"""register registrable objects into `objects`.
Registrable objects are properly configured subclasses of
:class:`RegistrableObject`. Objects which are not defined in the module
`modname` or which are in `butclasses` won't be registered.
Typical usage is:
.. sourcecode:: python
store.register_all(globals().values(), __name__, (ClassIWantToRegisterExplicitly,))
So you get partially automatic registration, keeping manual registration
for some object (to use
:meth:`~logilab.common.registry.RegistryStore.register_and_replace` for
instance).
"""
assert isinstance(modname, basestring), \
'modname expected to be a module name (ie string), got %r' % modname
for obj in objects:
if self.is_registrable(obj) and obj.__module__ == modname and not obj in butclasses:
if isinstance(obj, type):
self._load_ancestors_then_object(modname, obj, butclasses)
else:
self.register(obj)
def register(self, obj, registryname=None, oid=None, clear=False):
"""register `obj` implementation into `registryname` or
`obj.__registries__` if not specified, with identifier `oid` or
`obj.__regid__` if not specified.
If `clear` is true, all objects with the same identifier will be
previously unregistered.
"""
assert not obj.__dict__.get('__abstract__'), obj
for registryname in obj_registries(obj, registryname):
registry = self.setdefault(registryname)
registry.register(obj, oid=oid, clear=clear)
self.debug("register %s in %s['%s']",
registry.objname(obj), registryname, oid or obj.__regid__)
self._loadedmods.setdefault(obj.__module__, {})[registry.objid(obj)] = obj
def unregister(self, obj, registryname=None):
"""unregister `obj` object from the registry `registryname` or
`obj.__registries__` if not specified.
"""
for registryname in obj_registries(obj, registryname):
registry = self[registryname]
registry.unregister(obj)
self.debug("unregister %s from %s['%s']",
registry.objname(obj), registryname, obj.__regid__)
def register_and_replace(self, obj, replaced, registryname=None):
"""register `obj` object into `registryname` or
`obj.__registries__` if not specified. If found, the `replaced` object
will be unregistered first (else a warning will be issued as it is
generally unexpected).
"""
for registryname in obj_registries(obj, registryname):
registry = self[registryname]
registry.register_and_replace(obj, replaced)
self.debug("register %s in %s['%s'] instead of %s",
registry.objname(obj), registryname, obj.__regid__,
registry.objname(replaced))
# initialization methods ###################################################
def init_registration(self, path, extrapath=None):
"""reset registry and walk down path to return list of (path, name)
file modules to be loaded"""
# XXX make this private by renaming it to _init_registration ?
self.reset()
# compute list of all modules that have to be loaded
self._toloadmods, filemods = _toload_info(path, extrapath)
# XXX is _loadedmods still necessary ? It seems like it's useful
# to avoid loading same module twice, especially with the
# _load_ancestors_then_object logic but this needs to be checked
self._loadedmods = {}
return filemods
def register_objects(self, path, extrapath=None):
"""register all objects found walking down <path>"""
# load views from each directory in the instance's path
# XXX inline init_registration ?
filemods = self.init_registration(path, extrapath)
for filepath, modname in filemods:
self.load_file(filepath, modname)
self.initialization_completed()
def initialization_completed(self):
"""call initialization_completed() on all known registries"""
for reg in self.itervalues():
reg.initialization_completed()
def _mdate(self, filepath):
""" return the modification date of a file path """
try:
return stat(filepath)[-2]
except OSError:
# this typically happens on emacs backup files (.#foo.py)
self.warning('Unable to load %s. It is likely to be a backup file',
filepath)
return None
def is_reload_needed(self, path):
"""return True if something module changed and the registry should be
reloaded
"""
lastmodifs = self._lastmodifs
for fileordir in path:
if isdir(fileordir) and exists(join(fileordir, '__init__.py')):
if self.is_reload_needed([join(fileordir, fname)
for fname in listdir(fileordir)]):
return True
elif fileordir[-3:] == '.py':
mdate = self._mdate(fileordir)
if mdate is None:
continue # backup file, see _mdate implementation
elif "flymake" in fileordir:
# flymake + pylint in use, don't consider these they will corrupt the registry
continue
if fileordir not in lastmodifs or lastmodifs[fileordir] < mdate:
self.info('File %s changed since last visit', fileordir)
return True
return False
def load_file(self, filepath, modname):
""" load registrable objects (if any) from a python file """
from logilab.common.modutils import load_module_from_name
if modname in self._loadedmods:
return
self._loadedmods[modname] = {}
mdate = self._mdate(filepath)
if mdate is None:
return # backup file, see _mdate implementation
elif "flymake" in filepath:
# flymake + pylint in use, don't consider these they will corrupt the registry
return
# set update time before module loading, else we get some reloading
# weirdness in case of syntax error or other error while importing the
# module
self._lastmodifs[filepath] = mdate
# load the module
module = load_module_from_name(modname)
self.load_module(module)
def load_module(self, module):
"""Automatically handle module objects registration.
Instances are registered as soon as they are hashable and have the
following attributes:
* __regid__ (a string)
* __select__ (a callable)
* __registries__ (a tuple/list of string)
For classes this is a bit more complicated :
- first ensure parent classes are already registered
- class with __abstract__ == True in their local dictionary are skipped
- object class needs to have registries and identifier properly set to a
non empty string to be registered.
"""
self.info('loading %s from %s', module.__name__, module.__file__)
if hasattr(module, 'registration_callback'):
module.registration_callback(self)
else:
self.register_all(vars(module).itervalues(), module.__name__)
def _load_ancestors_then_object(self, modname, objectcls, butclasses=()):
"""handle class registration according to rules defined in
:meth:`load_module`
"""
# backward compat, we used to allow whatever else than classes
if not isinstance(objectcls, type):
if self.is_registrable(objectcls) and objectcls.__module__ == modname:
self.register(objectcls)
return
# imported classes
objmodname = objectcls.__module__
if objmodname != modname:
# The module of the object is not the same as the currently
# worked on module, or this is actually an instance, which
# has no module at all
if objmodname in self._toloadmods:
# if this is still scheduled for loading, let's proceed immediately,
# but using the object module
self.load_file(self._toloadmods[objmodname], objmodname)
return
# ensure object hasn't been already processed
clsid = '%s.%s' % (modname, objectcls.__name__)
if clsid in self._loadedmods[modname]:
return
self._loadedmods[modname][clsid] = objectcls
# ensure ancestors are registered
for parent in objectcls.__bases__:
self._load_ancestors_then_object(modname, parent, butclasses)
# ensure object is registrable
if objectcls in butclasses or not self.is_registrable(objectcls):
return
# backward compat
reg = self.setdefault(obj_registries(objectcls)[0])
if reg.objname(objectcls)[0] == '_':
warn("[lgc 0.59] object whose name start with '_' won't be "
"skipped anymore at some point, use __abstract__ = True "
"instead (%s)" % objectcls, DeprecationWarning)
return
# register, finally
self.register(objectcls)
@classmethod
def is_registrable(cls, obj):
"""ensure `obj` should be registered
as arbitrary stuff may be registered, do a lot of check and warn about
weird cases (think to dumb proxy objects)
"""
if isinstance(obj, type):
if not issubclass(obj, RegistrableObject):
# ducktyping backward compat
if not (getattr(obj, '__registries__', None)
and getattr(obj, '__regid__', None)
and getattr(obj, '__select__', None)):
return False
elif issubclass(obj, RegistrableInstance):
return False
elif not isinstance(obj, RegistrableInstance):
return False
if not obj.__regid__:
return False # no regid
registries = obj.__registries__
if not registries:
return False # no registries
selector = obj.__select__
if not selector:
return False # no selector
if obj.__dict__.get('__abstract__', False):
return False
# then detect potential problems that should be warned
if not isinstance(registries, (tuple, list)):
cls.warning('%s has __registries__ which is not a list or tuple', obj)
return False
if not callable(selector):
cls.warning('%s has not callable __select__', obj)
return False
return True
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
# init logging
set_log_methods(RegistryStore, getLogger('registry.store'))
set_log_methods(Registry, getLogger('registry'))
# helpers for debugging selectors
TRACED_OIDS = None
def _trace_selector(cls, selector, args, ret):
vobj = args[0]
if TRACED_OIDS == 'all' or vobj.__regid__ in TRACED_OIDS:
print '%s -> %s for %s(%s)' % (cls, ret, vobj, vobj.__regid__)
def _lltrace(selector):
"""use this decorator on your predicates so they become traceable with
:class:`traced_selection`
"""
def traced(cls, *args, **kwargs):
ret = selector(cls, *args, **kwargs)
if TRACED_OIDS is not None:
_trace_selector(cls, selector, args, ret)
return ret
traced.__name__ = selector.__name__
traced.__doc__ = selector.__doc__
return traced
class traced_selection(object): # pylint: disable=C0103
"""
Typical usage is :
.. sourcecode:: python
>>> from logilab.common.registry import traced_selection
>>> with traced_selection():
... # some code in which you want to debug selectors
... # for all objects
Don't forget the 'from __future__ import with_statement' at the module top-level
if you're using python prior to 2.6.
This will yield lines like this in the logs::
selector one_line_rset returned 0 for <class 'elephant.Babar'>
You can also give to :class:`traced_selection` the identifiers of objects on
which you want to debug selection ('oid1' and 'oid2' in the example above).
.. sourcecode:: python
>>> with traced_selection( ('regid1', 'regid2') ):
... # some code in which you want to debug selectors
... # for objects with __regid__ 'regid1' and 'regid2'
A potentially useful point to set up such a tracing function is
the `logilab.common.registry.Registry.select` method body.
"""
def __init__(self, traced='all'):
self.traced = traced
def __enter__(self):
global TRACED_OIDS
TRACED_OIDS = self.traced
def __exit__(self, exctype, exc, traceback):
global TRACED_OIDS
TRACED_OIDS = None
return traceback is None
# selector base classes and operations ########################################
def objectify_predicate(selector_func):
"""Most of the time, a simple score function is enough to build a selector.
The :func:`objectify_predicate` decorator turn it into a proper selector
class::
@objectify_predicate
def one(cls, req, rset=None, **kwargs):
return 1
class MyView(View):
__select__ = View.__select__ & one()
"""
return type(selector_func.__name__, (Predicate,),
{'__doc__': selector_func.__doc__,
'__call__': lambda self, *a, **kw: selector_func(*a, **kw)})
_PREDICATES = {}
def wrap_predicates(decorator):
for predicate in _PREDICATES.itervalues():
if not '_decorators' in predicate.__dict__:
predicate._decorators = set()
if decorator in predicate._decorators:
continue
predicate._decorators.add(decorator)
predicate.__call__ = decorator(predicate.__call__)
class PredicateMetaClass(type):
def __new__(cls, *args, **kwargs):
# use __new__ so subclasses doesn't have to call Predicate.__init__
inst = type.__new__(cls, *args, **kwargs)
proxy = weakref.proxy(inst, lambda p: _PREDICATES.pop(id(p)))
_PREDICATES[id(proxy)] = proxy
return inst
class Predicate(object):
"""base class for selector classes providing implementation
for operators ``&``, ``|`` and ``~``
This class is only here to give access to binary operators, the selector
logic itself should be implemented in the :meth:`__call__` method. Notice it
should usually accept any arbitrary arguments (the context), though that may
vary depending on your usage of the registry.
a selector is called to help choosing the correct object for a
particular context by returning a score (`int`) telling how well
the implementation given as first argument fit to the given context.
0 score means that the class doesn't apply.
"""
__metaclass__ = PredicateMetaClass
@property
def func_name(self):
# backward compatibility
return self.__class__.__name__
def search_selector(self, selector):
"""search for the given selector, selector instance or tuple of
selectors in the selectors tree. Return None if not found.
"""
if self is selector:
return self
if (isinstance(selector, type) or isinstance(selector, tuple)) and \
isinstance(self, selector):
return self
return None
def __str__(self):
return self.__class__.__name__
def __and__(self, other):
return AndPredicate(self, other)
def __rand__(self, other):
return AndPredicate(other, self)
def __iand__(self, other):
return AndPredicate(self, other)
def __or__(self, other):
return OrPredicate(self, other)
def __ror__(self, other):
return OrPredicate(other, self)
def __ior__(self, other):
return OrPredicate(self, other)
def __invert__(self):
return NotPredicate(self)
# XXX (function | function) or (function & function) not managed yet
def __call__(self, cls, *args, **kwargs):
return NotImplementedError("selector %s must implement its logic "
"in its __call__ method" % self.__class__)
def __repr__(self):
return u'<Predicate %s at %x>' % (self.__class__.__name__, id(self))
class MultiPredicate(Predicate):
"""base class for compound selector classes"""
def __init__(self, *selectors):
self.selectors = self.merge_selectors(selectors)
def __str__(self):
return '%s(%s)' % (self.__class__.__name__,
','.join(str(s) for s in self.selectors))
@classmethod
def merge_selectors(cls, selectors):
"""deal with selector instanciation when necessary and merge
multi-selectors if possible:
AndPredicate(AndPredicate(sel1, sel2), AndPredicate(sel3, sel4))
==> AndPredicate(sel1, sel2, sel3, sel4)
"""
merged_selectors = []
for selector in selectors:
# XXX do we really want magic-transformations below?
# if so, wanna warn about them?
if isinstance(selector, types.FunctionType):
selector = objectify_predicate(selector)()
if isinstance(selector, type) and issubclass(selector, Predicate):
selector = selector()
assert isinstance(selector, Predicate), selector
if isinstance(selector, cls):
merged_selectors += selector.selectors
else:
merged_selectors.append(selector)
return merged_selectors
def search_selector(self, selector):
"""search for the given selector or selector instance (or tuple of
selectors) in the selectors tree. Return None if not found
"""
for childselector in self.selectors:
if childselector is selector:
return childselector
found = childselector.search_selector(selector)
if found is not None:
return found
# if not found in children, maybe we are looking for self?
return super(MultiPredicate, self).search_selector(selector)
class AndPredicate(MultiPredicate):
"""and-chained selectors"""
def __call__(self, cls, *args, **kwargs):
score = 0
for selector in self.selectors:
partscore = selector(cls, *args, **kwargs)
if not partscore:
return 0
score += partscore
return score
class OrPredicate(MultiPredicate):
"""or-chained selectors"""
def __call__(self, cls, *args, **kwargs):
for selector in self.selectors:
partscore = selector(cls, *args, **kwargs)
if partscore:
return partscore
return 0
class NotPredicate(Predicate):
"""negation selector"""
def __init__(self, selector):
self.selector = selector
def __call__(self, cls, *args, **kwargs):
score = self.selector(cls, *args, **kwargs)
return int(not score)
def __str__(self):
return 'NOT(%s)' % self.selector
class yes(Predicate): # pylint: disable=C0103
"""Return the score given as parameter, with a default score of 0.5 so any
other selector take precedence.
Usually used for objects which can be selected whatever the context, or
also sometimes to add arbitrary points to a score.
Take care, `yes(0)` could be named 'no'...
"""
def __init__(self, score=0.5):
self.score = score
def __call__(self, *args, **kwargs):
return self.score
# deprecated stuff #############################################################
from logilab.common.deprecation import deprecated
@deprecated('[lgc 0.59] use Registry.objid class method instead')
def classid(cls):
return '%s.%s' % (cls.__module__, cls.__name__)
@deprecated('[lgc 0.59] use obj_registries function instead')
def class_registries(cls, registryname):
return obj_registries(cls, registryname)
|
gpl-3.0
|
abhinavmoudgil95/root
|
documentation/primer/macros/runall.py
|
67
|
1051
|
#! /usr/bin/env python
macros = [\
"slits.C",
"write_ntuple_to_file_advanced.C",
"write_ntuple_to_file.C",
"write_to_file.C",
"ExampleMacro.C",
"ExampleMacro_GUI.C",
"makeMySelector.C",
"RunMySelector.C",
"macro1.C",
"macro2.C",
"macro3.C",
"macro4.C",
"macro5.C",
"macro6.C",
"macro7.C",
"macro8.C",
"macro9.C",
"read_from_file.C",
"read_ntuple_from_file.C",
"read_ntuple_with_chain.C",
"TGraphFit.C",
"multigraph.C"]
pymacros = [\
"TGraphFit.py",
"macro3.py"]
import os
import sys
for mName in macros:
command = "root -b -l -q %s" %mName
if mName == "slits.C": command = 'echo "2 4" | %s' %command
print "\n ******* Running %s" %mName
if 0 !=os.system(command):
print "Macro %s" %mName
sys.exit(1)
print "\n"+"-"*80+"\nAll macros ran successfully"
for mName in pymacros:
command = "echo 1 | python %s" %mName
print "\n ******* Running %s" %mName
if 0 !=os.system(command):
print "Python macro %s" %mName
sys.exit(1)
print "\n"+"-"*80+"\nAll Python macros ran successfully"
sys.exit(0)
|
lgpl-2.1
|
richbs/colourlens
|
colourlens/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py
|
3131
|
47315
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
|
mit
|
joariasl/odoo
|
addons/sale_order_dates/sale_order_dates.py
|
223
|
5308
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
class sale_order_dates(osv.osv):
"""Add several date fields to Sale Orders, computed or user-entered"""
_inherit = 'sale.order'
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
"""Compute the expected date from the requested date, not the order date"""
if order and order.requested_date:
date_planned = datetime.strptime(order.requested_date, DEFAULT_SERVER_DATETIME_FORMAT)
date_planned -= timedelta(days=order.company_id.security_lead)
return date_planned.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return super(sale_order_dates, self)._get_date_planned(
cr, uid, order, line, start_date, context=context)
def _get_effective_date(self, cr, uid, ids, name, arg, context=None):
"""Read the shipping date from the related packings"""
# TODO: would be better if it returned the date the picking was processed?
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
for pick in order.picking_ids:
dates_list.append(pick.date)
if dates_list:
res[order.id] = min(dates_list)
else:
res[order.id] = False
return res
def _get_commitment_date(self, cr, uid, ids, name, arg, context=None):
"""Compute the commitment date"""
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
order_datetime = datetime.strptime(order.date_order, DEFAULT_SERVER_DATETIME_FORMAT)
for line in order.order_line:
if line.state == 'cancel':
continue
dt = order_datetime + timedelta(days=line.delay or 0.0)
dt_s = dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
dates_list.append(dt_s)
if dates_list:
res[order.id] = min(dates_list)
return res
def onchange_requested_date(self, cr, uid, ids, requested_date,
commitment_date, context=None):
"""Warn if the requested dates is sooner than the commitment date"""
if (requested_date and commitment_date and requested_date < commitment_date):
return {'warning': {
'title': _('Requested date is too soon!'),
'message': _("The date requested by the customer is "
"sooner than the commitment date. You may be "
"unable to honor the customer's request.")
}
}
return {}
_columns = {
'commitment_date': fields.function(_get_commitment_date, store=True,
type='datetime', string='Commitment Date',
help="Date by which the products are sure to be delivered. This is "
"a date that you can promise to the customer, based on the "
"Product Lead Times."),
'requested_date': fields.datetime('Requested Date',
readonly=True, states={'draft': [('readonly', False)],
'sent': [('readonly', False)]}, copy=False,
help="Date by which the customer has requested the items to be "
"delivered.\n"
"When this Order gets confirmed, the Delivery Order's "
"expected date will be computed based on this date and the "
"Company's Security Delay.\n"
"Leave this field empty if you want the Delivery Order to be "
"processed as soon as possible. In that case the expected "
"date will be computed using the default method: based on "
"the Product Lead Times and the Company's Security Delay."),
'effective_date': fields.function(_get_effective_date, type='date',
store=True, string='Effective Date',
help="Date on which the first Delivery Order was created."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ProfessionalIT/professionalit-webiste
|
sdk/google_appengine/lib/pyasn1/pyasn1/type/namedval.py
|
200
|
1605
|
# ASN.1 named integers
from pyasn1 import error
__all__ = [ 'NamedValues' ]
class NamedValues:
def __init__(self, *namedValues):
self.nameToValIdx = {}; self.valToNameIdx = {}
self.namedValues = ()
automaticVal = 1
for namedValue in namedValues:
if isinstance(namedValue, tuple):
name, val = namedValue
else:
name = namedValue
val = automaticVal
if name in self.nameToValIdx:
raise error.PyAsn1Error('Duplicate name %s' % (name,))
self.nameToValIdx[name] = val
if val in self.valToNameIdx:
raise error.PyAsn1Error('Duplicate value %s=%s' % (name, val))
self.valToNameIdx[val] = name
self.namedValues = self.namedValues + ((name, val),)
automaticVal = automaticVal + 1
def __str__(self): return str(self.namedValues)
def getName(self, value):
if value in self.valToNameIdx:
return self.valToNameIdx[value]
def getValue(self, name):
if name in self.nameToValIdx:
return self.nameToValIdx[name]
def __getitem__(self, i): return self.namedValues[i]
def __len__(self): return len(self.namedValues)
def __add__(self, namedValues):
return self.__class__(*self.namedValues + namedValues)
def __radd__(self, namedValues):
return self.__class__(*namedValues + tuple(self))
def clone(self, *namedValues):
return self.__class__(*tuple(self) + namedValues)
# XXX clone/subtype?
|
lgpl-3.0
|
FreakJoe/cryptolockpy
|
cryptolock/SecureDatabase.py
|
1
|
2510
|
"""Provides an interface for interaction with the encrypted sqlite database"""
from cryptolock.Database import Database
from cryptolock.security import encrypt, decrypt, ensure_key_validity
from cryptolock.exceptions import DocumentNotFoundException
from config import DB_NAME
class SecureDatabase(object):
"""Interface for interaction with the encrypted sqlite database"""
def __init__(self, db_name=DB_NAME):
"""Initializes database object that will be used to store secure data"""
self.database = Database(db_name)
def close(self):
"""Closes the session"""
return self.database.close()
def add_document(self, document, key):
"""Encrypts a document's content and passes it on to be saved in the database"""
# Ensure the document is supplied as a list or tuple of two strings: document name and document content
if (not isinstance(document, tuple) and not isinstance(document, list)) or not len(document) == 2 or not isinstance(document[0], str) or not isinstance(document[1], str):
return False
# Ensure key validity
key = ensure_key_validity(key)
document_name = document[0]
document_content = document[1]
encrypted_document_content = encrypt(document_content, key)
return self.database.add_document((document_name, encrypted_document_content))
def update_document(self, document, key):
"""Updates an existing document in the database"""
# Ensure the document is supplied as a list or tuple of two strings: document name and document content
if (not isinstance(document, tuple) and not isinstance(document, list)) or not len(document) == 2 or not isinstance(document[0], str) or not isinstance(document[1], str):
return False
# Ensure key validity
key = ensure_key_validity(key)
document_name = document[0]
document_content = document[1]
encrypted_document_content = encrypt(document_content, key)
return self.database.update_document((document_name, encrypted_document_content))
def get_document_content(self, document_name, key):
"""Fetches a document's content from the database"""
# Ensure key validity
key = ensure_key_validity(key)
document_content = self.database.get_document_content(document_name)
if not document_content:
raise DocumentNotFoundException
return decrypt(document_content, key)
|
mit
|
neilLasrado/frappe
|
frappe/utils/doctor.py
|
9
|
3574
|
from __future__ import unicode_literals, print_function
import frappe.utils
from collections import defaultdict
from rq import Worker, Connection
from frappe.utils.background_jobs import get_redis_conn, get_queue, get_queue_list
from frappe.utils.scheduler import is_scheduler_disabled
from six import iteritems
def get_workers():
with Connection(get_redis_conn()):
workers = Worker.all()
return workers
def purge_pending_jobs(event=None, site=None, queue=None):
"""
Purge tasks of the event event type. Passing 'all' will not purge all
events but of the all event type, ie. the ones that are enqueued every five
mintues and would any leave daily, hourly and weekly tasks
"""
purged_task_count = 0
for queue in get_queue_list(queue):
q = get_queue(queue)
for job in q.jobs:
if (site and event):
if job.kwargs['site'] == site and job.kwargs['event'] == event:
job.delete()
purged_task_count+=1
elif site:
if job.kwargs['site'] == site:
job.delete()
purged_task_count+=1
elif event:
if job.kwargs['event'] == event:
job.delete()
purged_task_count+=1
else:
purged_task_count += q.count
q.empty()
return purged_task_count
def get_jobs_by_queue(site=None):
jobs_per_queue = defaultdict(list)
job_count = consolidated_methods = {}
for queue in get_queue_list():
q = get_queue(queue)
for job in q.jobs:
if not site:
jobs_per_queue[queue].append(job.kwargs.get('method') or job.description)
elif job.kwargs['site'] == site:
jobs_per_queue[queue].append(job.kwargs.get('method') or job.description)
consolidated_methods = {}
for method in jobs_per_queue[queue]:
if method not in list(consolidated_methods):
consolidated_methods[method] = 1
else:
consolidated_methods[method] += 1
job_count[queue] = len(jobs_per_queue[queue])
jobs_per_queue[queue] = consolidated_methods
return jobs_per_queue, job_count
def get_pending_jobs(site=None):
jobs_per_queue = defaultdict(list)
for queue in get_queue_list():
q = get_queue(queue)
for job in q.jobs:
method_kwargs = job.kwargs['kwargs'] if job.kwargs['kwargs'] else ""
if job.kwargs['site'] == site:
jobs_per_queue[queue].append("{0} {1}".
format(job.kwargs['method'], method_kwargs))
return jobs_per_queue
def check_number_of_workers():
return len(get_workers())
def get_running_tasks():
for worker in get_workers():
return worker.get_current_job()
def doctor(site=None):
"""
Prints diagnostic information for the scheduler
"""
with frappe.init_site(site):
workers_online = check_number_of_workers()
jobs_per_queue, job_count = get_jobs_by_queue(site)
print("-----Checking scheduler status-----")
if site:
sites = [site]
else:
sites = frappe.utils.get_sites()
for s in sites:
frappe.init(s)
frappe.connect()
if is_scheduler_disabled():
print("Scheduler disabled for", s)
frappe.destroy()
# TODO improve this
print("Workers online:", workers_online)
print("-----{0} Jobs-----".format(site))
for queue in get_queue_list():
if jobs_per_queue[queue]:
print("Queue:", queue)
print("Number of Jobs: ", job_count[queue])
print("Methods:")
for method, count in iteritems(jobs_per_queue[queue]):
print("{0} : {1}".format(method, count))
print("------------")
return True
def pending_jobs(site=None):
print("-----Pending Jobs-----")
pending_jobs = get_pending_jobs(site)
for queue in get_queue_list():
if(pending_jobs[queue]):
print("-----Queue :{0}-----".format(queue))
print("\n".join(pending_jobs[queue]))
|
mit
|
epfl-lts2/pygsp
|
examples/random_walk.py
|
1
|
1806
|
r"""
Random walks
============
Probability of a random walker to be on any given vertex after a given number
of steps starting from a given distribution.
"""
# sphinx_gallery_thumbnail_number = 2
import numpy as np
from scipy import sparse
from matplotlib import pyplot as plt
import pygsp as pg
N = 7
steps = [0, 1, 2, 3]
graph = pg.graphs.Grid2d(N)
delta = np.zeros(graph.N)
delta[N//2*N + N//2] = 1
probability = sparse.diags(graph.dw**(-1)).dot(graph.W)
fig, axes = plt.subplots(1, len(steps), figsize=(12, 3))
for step, ax in zip(steps, axes):
state = (probability**step).__rmatmul__(delta) ## = delta @ probability**step
graph.plot(state, ax=ax, title=r'$\delta P^{}$'.format(step))
ax.set_axis_off()
fig.tight_layout()
###############################################################################
# Stationary distribution.
graphs = [
pg.graphs.Ring(10),
pg.graphs.Grid2d(5),
pg.graphs.Comet(8, 4),
pg.graphs.BarabasiAlbert(20, seed=42),
]
fig, axes = plt.subplots(1, len(graphs), figsize=(12, 3))
for graph, ax in zip(graphs, axes):
if not hasattr(graph, 'coords'):
graph.set_coordinates(seed=10)
P = sparse.diags(graph.dw**(-1)).dot(graph.W)
# e, u = np.linalg.eig(P.T.toarray())
# np.testing.assert_allclose(np.linalg.inv(u.T) @ np.diag(e) @ u.T,
# P.toarray(), atol=1e-10)
# np.testing.assert_allclose(np.abs(e[0]), 1)
# stationary = np.abs(u.T[0])
e, u = sparse.linalg.eigs(P.T, k=1, which='LR')
np.testing.assert_allclose(e, 1)
stationary = np.abs(u).squeeze()
assert np.all(stationary < 0.71)
colorbar = False if type(graph) is pg.graphs.Ring else True
graph.plot(stationary, colorbar=colorbar, ax=ax, title='$xP = x$')
ax.set_axis_off()
fig.tight_layout()
|
bsd-3-clause
|
a-parhom/edx-platform
|
openedx/core/djangoapps/content/course_overviews/tasks.py
|
17
|
1815
|
import logging
from celery import task
from celery_utils.persist_on_failure import LoggedPersistOnFailureTask
from django.conf import settings
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = logging.getLogger(__name__)
DEFAULT_ALL_COURSES = False
DEFAULT_CHUNK_SIZE = 50
DEFAULT_FORCE_UPDATE = False
def chunks(sequence, chunk_size):
return (sequence[index: index + chunk_size] for index in xrange(0, len(sequence), chunk_size))
def _task_options(routing_key):
task_options = {}
if getattr(settings, 'HIGH_MEM_QUEUE', None):
task_options['routing_key'] = settings.HIGH_MEM_QUEUE
if routing_key:
task_options['routing_key'] = routing_key
return task_options
def enqueue_async_course_overview_update_tasks(
course_ids,
all_courses=False,
force_update=False,
chunk_size=DEFAULT_CHUNK_SIZE,
routing_key=None
):
if all_courses:
course_keys = [course.id for course in modulestore().get_course_summaries()]
else:
course_keys = [CourseKey.from_string(id) for id in course_ids]
for course_key_group in chunks(course_keys, chunk_size):
course_key_strings = [unicode(key) for key in course_key_group]
options = _task_options(routing_key)
async_course_overview_update.apply_async(
args=course_key_strings,
kwargs={'force_update': force_update},
**options
)
@task(base=LoggedPersistOnFailureTask)
def async_course_overview_update(*args, **kwargs):
course_keys = [CourseKey.from_string(arg) for arg in args]
CourseOverview.update_select_courses(course_keys, force_update=kwargs['force_update'])
|
agpl-3.0
|
dyyi/moneybook
|
venv/Lib/encodings/mbcs.py
|
860
|
1211
|
""" Python 'mbcs' Codec for Windows
Cloned by Mark Hammond ([email protected]) from ascii.py,
which was written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
# Import them explicitly to cause an ImportError
# on non-Windows systems
from codecs import mbcs_encode, mbcs_decode
# for IncrementalDecoder, IncrementalEncoder, ...
import codecs
### Codec APIs
encode = mbcs_encode
def decode(input, errors='strict'):
return mbcs_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return mbcs_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = mbcs_decode
class StreamWriter(codecs.StreamWriter):
encode = mbcs_encode
class StreamReader(codecs.StreamReader):
decode = mbcs_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
apache-2.0
|
jnayak1/osf.io
|
website/conferences/model.py
|
5
|
2592
|
# -*- coding: utf-8 -*-
import bson
from modularodm import fields, Q
from modularodm.exceptions import ModularOdmException
from framework.mongo import StoredObject
from website.conferences.exceptions import ConferenceError
DEFAULT_FIELD_NAMES = {
'submission1': 'poster',
'submission2': 'talk',
'submission1_plural': 'posters',
'submission2_plural': 'talks',
'meeting_title_type': 'Posters & Talks',
'add_submission': 'poster or talk',
'mail_subject': 'Presentation title',
'mail_message_body': 'Presentation abstract (if any)',
'mail_attachment': 'Your presentation file (e.g., PowerPoint, PDF, etc.)'
}
class Conference(StoredObject):
#: Determines the email address for submission and the OSF url
# Example: If endpoint is spsp2014, then submission email will be
# [email protected] or [email protected] and the OSF url will
# be osf.io/view/spsp2014
endpoint = fields.StringField(primary=True, required=True, unique=True)
#: Full name, e.g. "SPSP 2014"
name = fields.StringField(required=True)
info_url = fields.StringField(required=False, default=None)
logo_url = fields.StringField(required=False, default=None)
location = fields.StringField(required=False, default=None)
start_date = fields.DateTimeField(default=None)
end_date = fields.DateTimeField(default=None)
active = fields.BooleanField(required=True)
admins = fields.ForeignField('user', list=True, required=False, default=None)
#: Whether to make submitted projects public
public_projects = fields.BooleanField(required=False, default=True)
poster = fields.BooleanField(default=True)
talk = fields.BooleanField(default=True)
# field_names are used to customize the text on the conference page, the categories
# of submissions, and the email adress to send material to.
field_names = fields.DictionaryField(default=lambda: DEFAULT_FIELD_NAMES)
# Cached number of submissions
num_submissions = fields.IntegerField(default=0)
@classmethod
def get_by_endpoint(cls, endpoint, active=True):
query = Q('endpoint', 'iexact', endpoint)
if active:
query &= Q('active', 'eq', True)
try:
return Conference.find_one(query)
except ModularOdmException:
raise ConferenceError('Endpoint {0} not found'.format(endpoint))
class MailRecord(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(bson.ObjectId()))
data = fields.DictionaryField()
records = fields.AbstractForeignField(list=True)
|
apache-2.0
|
tjanez/ansible
|
lib/ansible/plugins/callback/timer.py
|
168
|
1125
|
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from datetime import datetime
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
This callback module tells you how long your plays ran for.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'timer'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self.start_time = datetime.now()
def days_hours_minutes_seconds(self, runtime):
minutes = (runtime.seconds // 60) % 60
r_seconds = runtime.seconds - (minutes * 60)
return runtime.days, runtime.seconds // 3600, minutes, r_seconds
def playbook_on_stats(self, stats):
self.v2_playbook_on_stats(stats)
def v2_playbook_on_stats(self, stats):
end_time = datetime.now()
runtime = end_time - self.start_time
self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(runtime)))
|
gpl-3.0
|
ayoubg/gem5-graphics
|
gem5-gpu/tests/configs/gem5-gpu-ruby.py
|
1
|
7700
|
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# Copyright (c) 2012-2015 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Joel Hestness, Jason Power
import optparse
import os
import sys
from os.path import join as joinpath
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
def getTestFilename(test_location):
file_chop_index = test_location.find('tests/')
if file_chop_index <= 0:
fatal('test_filename lacks \'tests\/\' substring')
test_filename = test_location[file_chop_index:]
test_filename = test_filename.replace('/opt/','/')
test_filename = test_filename.replace('/debug/','/')
test_filename = test_filename.replace('/fast/','/')
supported_isas = [ 'arm', 'x86' ]
isa = None
for test_isa in supported_isas:
if test_isa in test_filename:
isa = test_isa
break
if not isa:
fatal('ISA not found in test: %s' % test_filename)
file_chop_index = test_filename.find('%s/' % isa)
if file_chop_index >= len(test_filename):
fatal('test_filename lacks \'%s\/\' substring' % isa)
test_filename = test_filename[:file_chop_index]
test_filename = os.path.join(test_filename, 'test.py')
if not os.path.exists(test_filename):
fatal('Could not find test script: \'%s\'' % test_filename)
return test_filename
addToPath('../configs/common')
addToPath('../configs/ruby')
addToPath('../configs/topologies')
addToPath('../../gem5-gpu/configs')
addToPath('../../gem5-gpu/configs/gpu_protocol')
import GPUConfig
import GPUMemConfig
import Options
import Ruby
import Simulation
parser = optparse.OptionParser()
GPUConfig.addGPUOptions(parser)
GPUMemConfig.addMemCtrlOptions(parser)
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
#
# Add the ruby specific and protocol specific options
#
Ruby.define_options(parser)
(options, args) = parser.parse_args()
# Use ruby
options.ruby = True
options.mem_type = "RubyMemoryControl"
options.g_depth_shader=1 #z-unit not used
if not args or len(args) != 1:
print "Error: script expects a single positional argument"
sys.exit(1)
if buildEnv['TARGET_ISA'] != "x86" and buildEnv['TARGET_ISA'] != "arm":
fatal("gem5-gpu doesn't currently work with non-ARM or non-x86 system!")
#
# Setup test benchmark to be run
#
# Get the filename of the test
test_filename = getTestFilename(args[0])
# Load the test information from the file
execfile(test_filename)
#
# CPU type configuration
#
if options.cpu_type != "timing" and options.cpu_type != "detailed":
print "Warning: gem5-gpu only works with timing and detailed CPUs. Defaulting to timing"
options.cpu_type = "timing"
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
#
# Memory space configuration
#
(cpu_mem_range, gpu_mem_range, total_mem_range) = GPUConfig.configureMemorySpaces(options)
# Hard code the cache block width to 128B for now
# TODO: Remove this if/when block size can be different than 128B
if options.cacheline_size != 128:
print "Warning: Only block size currently supported is 128B. Defaulting to 128."
options.cacheline_size = 128
#
# Instantiate system
#
system = System(cpu = [CPUClass(cpu_id = i)
for i in xrange(options.num_cpus)],
mem_mode = test_mem_mode,
mem_ranges = [cpu_mem_range],
cache_line_size = options.cacheline_size)
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
system.cpu_voltage_domain)
Simulation.setWorkCountOptions(system, options)
#
# Create the GPU
#
system.gpu = GPUConfig.createGPU(options, gpu_mem_range)
#
# Setup Ruby
#
system.ruby_clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
Ruby.create_system(options, False, system)
system.gpu.ruby = system.ruby
system.ruby.clk_domain = system.ruby_clk_domain
if options.split:
if options.access_backing_store:
#
# Reset Ruby's phys_mem to add the device memory range
#
system.ruby.phys_mem = SimpleMemory(range=total_mem_range,
in_addr_map=False)
#
# Connect CPU ports
#
for (i, cpu) in enumerate(system.cpu):
ruby_port = system.ruby._cpu_ports[i]
cpu.clk_domain = system.cpu_clk_domain
cpu.createThreads()
cpu.createInterruptController()
#
# Tie the cpu ports to the correct ruby system ports
#
cpu.icache_port = system.ruby._cpu_ports[i].slave
cpu.dcache_port = system.ruby._cpu_ports[i].slave
cpu.itb.walker.port = system.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = system.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] == "x86":
cpu.interrupts.pio = ruby_port.master
cpu.interrupts.int_master = ruby_port.slave
cpu.interrupts.int_slave = ruby_port.master
#
# Connect GPU ports
#
GPUConfig.connectGPUPorts(system.gpu, system.ruby, options)
if options.mem_type == "RubyMemoryControl":
GPUMemConfig.setMemoryControlOptions(system, options)
#
# Finalize setup and benchmark, and then run
#
root = Root(full_system = False, system = system)
command_line = []
command_line.append(binpath(options.cmd))
for option in options.options.split():
command_line.append(option)
root.system.cpu[0].workload = LiveProcess(cmd = command_line,
executable = binpath(options.cmd))
if root.system.cpu[0].checker != NULL:
root.system.cpu[0].checker.workload = root.system.cpu[0].workload
m5.disableAllListeners()
Simulation.run(options, root, system, FutureClass)
|
bsd-3-clause
|
40223239/cdb0622
|
static/Brython3.1.1-20150328-091302/Lib/linecache.py
|
785
|
3864
|
"""Cache lines from files.
This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.
"""
import sys
import os
import tokenize
__all__ = ["getline", "clearcache", "checkcache"]
def getline(filename, lineno, module_globals=None):
lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
# The cache
cache = {} # The cache
def clearcache():
"""Clear the cache entirely."""
global cache
cache = {}
def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
else:
return updatecache(filename, module_globals)
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = list(cache.keys())
else:
if filename in cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del cache[filename]
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename]
if not filename or (filename.startswith('<') and filename.endswith('>')):
return []
fullname = filename
try:
stat = os.stat(fullname)
except OSError:
basename = filename
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
return []
try:
with tokenize.open(fullname) as fp:
lines = fp.readlines()
except IOError:
return []
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines
|
gpl-3.0
|
albertomurillo/ansible
|
lib/ansible/modules/cloud/amazon/lambda.py
|
30
|
22661
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda
short_description: Manage AWS Lambda functions
description:
- Allows for the management of Lambda functions.
version_added: '2.2'
requirements: [ boto3 ]
options:
name:
description:
- The name you want to assign to the function you are uploading. Cannot be changed.
required: true
state:
description:
- Create or delete Lambda function.
default: present
choices: [ 'present', 'absent' ]
runtime:
description:
- The runtime environment for the Lambda function you are uploading.
- Required when creating a function. Uses parameters as described in boto3 docs.
- Required when C(state=present).
- For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html).
role:
description:
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
resources. You may use the bare ARN if the role belongs to the same AWS account.
- Required when C(state=present).
handler:
description:
- The function within your code that Lambda calls to begin execution.
zip_file:
description:
- A .zip file containing your deployment package
- If C(state=present) then either zip_file or s3_bucket must be present.
aliases: [ 'src' ]
s3_bucket:
description:
- Amazon S3 bucket name where the .zip file containing your deployment package is stored.
- If C(state=present) then either zip_file or s3_bucket must be present.
- C(s3_bucket) and C(s3_key) are required together.
s3_key:
description:
- The Amazon S3 object (the deployment package) key name you want to upload.
- C(s3_bucket) and C(s3_key) are required together.
s3_object_version:
description:
- The Amazon S3 object (the deployment package) version you want to upload.
description:
description:
- A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
timeout:
description:
- The function maximum execution time in seconds after which Lambda should terminate the function.
default: 3
memory_size:
description:
- The amount of memory, in MB, your Lambda function is given.
default: 128
vpc_subnet_ids:
description:
- List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run
the function in a VPC.
vpc_security_group_ids:
description:
- List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used.
environment_variables:
description:
- A dictionary of environment variables the Lambda function is given.
aliases: [ 'environment' ]
version_added: "2.3"
dead_letter_arn:
description:
- The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
version_added: "2.3"
tags:
description:
- tag dict to apply to the function (requires botocore 1.5.40 or above).
version_added: "2.5"
author:
- 'Steyn Huizinga (@steynovich)'
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create Lambda functions
- name: looped creation
lambda:
name: '{{ item.name }}'
state: present
zip_file: '{{ item.zip_file }}'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
vpc_subnet_ids:
- subnet-123abcde
- subnet-edcba321
vpc_security_group_ids:
- sg-123abcde
- sg-edcba321
environment_variables: '{{ item.env_vars }}'
tags:
key1: 'value1'
loop:
- name: HelloWorld
zip_file: hello-code.zip
env_vars:
key1: "first"
key2: "second"
- name: ByeBye
zip_file: bye-code.zip
env_vars:
key1: "1"
key2: "2"
# To remove previously added tags pass a empty dict
- name: remove tags
lambda:
name: 'Lambda function'
state: present
zip_file: 'code.zip'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
tags: {}
# Basic Lambda function deletion
- name: Delete Lambda functions HelloWorld and ByeBye
lambda:
name: '{{ item }}'
state: absent
loop:
- HelloWorld
- ByeBye
'''
RETURN = '''
code:
description: the lambda function location returned by get_function in boto3
returned: success
type: dict
sample:
{
'location': 'a presigned S3 URL',
'repository_type': 'S3',
}
configuration:
description: the lambda function metadata returned by get_function in boto3
returned: success
type: dict
sample:
{
'code_sha256': 'SHA256 hash',
'code_size': 123,
'description': 'My function',
'environment': {
'variables': {
'key': 'value'
}
},
'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1',
'function_name': 'myFunction',
'handler': 'index.handler',
'last_modified': '2017-08-01T00:00:00.000+0000',
'memory_size': 128,
'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution',
'runtime': 'nodejs6.10',
'timeout': 3,
'version': '1',
'vpc_config': {
'security_group_ids': [],
'subnet_ids': []
}
}
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import compare_aws_tags
import base64
import hashlib
import traceback
try:
from botocore.exceptions import ClientError, BotoCoreError, ValidationError, ParamValidationError
except ImportError:
pass # protected by AnsibleAWSModule
def get_account_id(module, region=None, endpoint=None, **aws_connect_kwargs):
"""return the account id we are currently working on
get_account_id tries too find out the account that we are working
on. It's not guaranteed that this will be easy so we try in
several different ways. Giving either IAM or STS privileges to
the account should be enough to permit this.
"""
account_id = None
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts',
region=region, endpoint=endpoint, **aws_connect_kwargs)
account_id = sts_client.get_caller_identity().get('Account')
except ClientError:
try:
iam_client = boto3_conn(module, conn_type='client', resource='iam',
region=region, endpoint=endpoint, **aws_connect_kwargs)
account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e.message)
account_id = except_msg.search(r"arn:aws:iam::([0-9]{12,32}):\w+/").group(1)
if account_id is None:
module.fail_json_aws(e, msg="getting account information")
except Exception as e:
module.fail_json_aws(e, msg="getting account information")
return account_id
def get_current_function(connection, function_name, qualifier=None):
try:
if qualifier is not None:
return connection.get_function(FunctionName=function_name, Qualifier=qualifier)
return connection.get_function(FunctionName=function_name)
except ClientError as e:
try:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return None
except (KeyError, AttributeError):
pass
raise e
def sha256sum(filename):
hasher = hashlib.sha256()
with open(filename, 'rb') as f:
hasher.update(f.read())
code_hash = hasher.digest()
code_b64 = base64.b64encode(code_hash)
hex_digest = code_b64.decode('utf-8')
return hex_digest
def set_tag(client, module, tags, function):
if not hasattr(client, "list_tags"):
module.fail_json(msg="Using tags requires botocore 1.5.40 or above")
changed = False
arn = function['Configuration']['FunctionArn']
try:
current_tags = client.list_tags(Resource=arn).get('Tags', {})
except ClientError as e:
module.fail_json(msg="Unable to list tags: {0}".format(to_native(e)),
exception=traceback.format_exc())
tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True)
try:
if tags_to_remove:
client.untag_resource(
Resource=arn,
TagKeys=tags_to_remove
)
changed = True
if tags_to_add:
client.tag_resource(
Resource=arn,
Tags=tags_to_add
)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc())
return changed
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
runtime=dict(),
role=dict(),
handler=dict(),
zip_file=dict(aliases=['src']),
s3_bucket=dict(),
s3_key=dict(),
s3_object_version=dict(),
description=dict(default=''),
timeout=dict(type='int', default=3),
memory_size=dict(type='int', default=128),
vpc_subnet_ids=dict(type='list'),
vpc_security_group_ids=dict(type='list'),
environment_variables=dict(type='dict'),
dead_letter_arn=dict(),
tags=dict(type='dict'),
)
mutually_exclusive = [['zip_file', 's3_key'],
['zip_file', 's3_bucket'],
['zip_file', 's3_object_version']]
required_together = [['s3_key', 's3_bucket'],
['vpc_subnet_ids', 'vpc_security_group_ids']]
required_if = [['state', 'present', ['runtime', 'handler', 'role']]]
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_if=required_if)
name = module.params.get('name')
state = module.params.get('state').lower()
runtime = module.params.get('runtime')
role = module.params.get('role')
handler = module.params.get('handler')
s3_bucket = module.params.get('s3_bucket')
s3_key = module.params.get('s3_key')
s3_object_version = module.params.get('s3_object_version')
zip_file = module.params.get('zip_file')
description = module.params.get('description')
timeout = module.params.get('timeout')
memory_size = module.params.get('memory_size')
vpc_subnet_ids = module.params.get('vpc_subnet_ids')
vpc_security_group_ids = module.params.get('vpc_security_group_ids')
environment_variables = module.params.get('environment_variables')
dead_letter_arn = module.params.get('dead_letter_arn')
tags = module.params.get('tags')
check_mode = module.check_mode
changed = False
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
try:
client = boto3_conn(module, conn_type='client', resource='lambda',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (ClientError, ValidationError) as e:
module.fail_json_aws(e, msg="Trying to connect to AWS")
if state == 'present':
if role.startswith('arn:aws:iam'):
role_arn = role
else:
# get account ID and assemble ARN
account_id = get_account_id(module, region=region, endpoint=ec2_url, **aws_connect_kwargs)
role_arn = 'arn:aws:iam::{0}:role/{1}'.format(account_id, role)
# Get function configuration if present, False otherwise
current_function = get_current_function(client, name)
# Update existing Lambda function
if state == 'present' and current_function:
# Get current state
current_config = current_function['Configuration']
current_version = None
# Update function configuration
func_kwargs = {'FunctionName': name}
# Update configuration if needed
if role_arn and current_config['Role'] != role_arn:
func_kwargs.update({'Role': role_arn})
if handler and current_config['Handler'] != handler:
func_kwargs.update({'Handler': handler})
if description and current_config['Description'] != description:
func_kwargs.update({'Description': description})
if timeout and current_config['Timeout'] != timeout:
func_kwargs.update({'Timeout': timeout})
if memory_size and current_config['MemorySize'] != memory_size:
func_kwargs.update({'MemorySize': memory_size})
if (environment_variables is not None) and (current_config.get(
'Environment', {}).get('Variables', {}) != environment_variables):
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn is not None:
if current_config.get('DeadLetterConfig'):
if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
else:
if dead_letter_arn != "":
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# Check for unsupported mutation
if current_config['Runtime'] != runtime:
module.fail_json(msg='Cannot change runtime. Please recreate the function')
# If VPC configuration is desired
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
if 'VpcConfig' in current_config:
# Compare VPC config with current config
current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
new_vpc_config = {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}
func_kwargs.update({'VpcConfig': new_vpc_config})
else:
# No VPC configuration is desired, assure VPC config is empty when present in current config
if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})
# Upload new configuration if configuration has changed
if len(func_kwargs) > 1:
try:
if not check_mode:
response = client.update_function_configuration(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to update lambda configuration")
# Update code configuration
code_kwargs = {'FunctionName': name, 'Publish': True}
# Update S3 location
if s3_bucket and s3_key:
# If function is stored on S3 always update
code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
# If S3 Object Version is given
if s3_object_version:
code_kwargs.update({'S3ObjectVersion': s3_object_version})
# Compare local checksum, update remote code when different
elif zip_file:
local_checksum = sha256sum(zip_file)
remote_checksum = current_config['CodeSha256']
# Only upload new code when local code is different compared to the remote code
if local_checksum != remote_checksum:
try:
with open(zip_file, 'rb') as f:
encoded_zip = f.read()
code_kwargs.update({'ZipFile': encoded_zip})
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
# Tag Function
if tags is not None:
if set_tag(client, module, tags, current_function):
changed = True
# Upload new code if needed (e.g. code checksum has changed)
if len(code_kwargs) > 2:
try:
if not check_mode:
response = client.update_function_code(**code_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to upload new code")
# Describe function code and configuration
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after updating')
# We're done
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Function doesn't exists, create new Lambda function
elif state == 'present':
if s3_bucket and s3_key:
# If function is stored on S3
code = {'S3Bucket': s3_bucket,
'S3Key': s3_key}
if s3_object_version:
code.update({'S3ObjectVersion': s3_object_version})
elif zip_file:
# If function is stored in local zipfile
try:
with open(zip_file, 'rb') as f:
zip_content = f.read()
code = {'ZipFile': zip_content}
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
else:
module.fail_json(msg='Either S3 object or path to zipfile required')
func_kwargs = {'FunctionName': name,
'Publish': True,
'Runtime': runtime,
'Role': role_arn,
'Code': code,
'Timeout': timeout,
'MemorySize': memory_size,
}
if description is not None:
func_kwargs.update({'Description': description})
if handler is not None:
func_kwargs.update({'Handler': handler})
if environment_variables:
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# If VPC configuration is given
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}})
# Finally try to create function
current_version = None
try:
if not check_mode:
response = client.create_function(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to create function")
# Tag Function
if tags is not None:
if set_tag(client, module, tags, get_current_function(client, name)):
changed = True
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after creating')
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Delete existing Lambda function
if state == 'absent' and current_function:
try:
if not check_mode:
client.delete_function(FunctionName=name)
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to delete Lambda function")
module.exit_json(changed=changed)
# Function already absent, do nothing
elif state == 'absent':
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
gpl-3.0
|
RedHatQE/cfme_tests
|
cfme/tests/control/test_control_simulation.py
|
1
|
1824
|
import pytest
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import ONE_PER_CATEGORY
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [
pytest.mark.long_running,
pytest.mark.provider(classes=[InfraProvider], selector=ONE_PER_CATEGORY),
pytest.mark.usefixtures("setup_provider"),
test_requirements.control,
]
FILL_DATA = {
"event_type": "Datastore Operation",
"event_value": "Datastore Analysis Complete",
"filter_type": "By Clusters",
"filter_value": "Cluster",
"submit_button": True
}
@pytest.mark.tier(1)
def test_control_icons_simulation(appliance):
"""
Bugzillas:
* 1349147, 1690572
Polarion:
assignee: jdupuy
casecomponent: Control
caseimportance: medium
initialEstimate: 1/15h
testSteps:
1. Have an infrastructure provider
2. Go to Control -> Simulation
3. Select:
Type: Datastore Operation
Event: Datastore Analysis Complete
VM Selection: By Clusters, Default
4. Submit
5. Check for all icons in this page
expectedResults:
1.
2.
3.
4.
5. All the icons should be present
"""
view = navigate_to(appliance.server, "ControlSimulation")
view.fill(FILL_DATA)
# Now check all the icons
assert view.simulation_results.squash_button.is_displayed
# Check the tree icons
tree = view.simulation_results.tree
# Check the root_item
assert tree.image_getter(tree.root_item)
# Check all the child items
for child_item in tree.child_items(tree.root_item):
assert tree.image_getter(child_item)
|
gpl-2.0
|
kmmartins/xbmc
|
addons/service.xbmc.versioncheck/lib/aptdeamonhandler.py
|
177
|
3661
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import xbmc
from common import *
try:
#import apt
import apt
from aptdaemon import client
from aptdaemon import errors
except:
log('python apt import error')
class AptdeamonHandler:
def __init__(self):
self.aptclient = client.AptClient()
def _check_versions(self, package):
if not self._update_cache():
return False, False
try:
trans = self.aptclient.upgrade_packages([package])
#trans = self.aptclient.upgrade_packages("bla")
trans.simulate(reply_handler=self._apttransstarted, error_handler=self._apterrorhandler)
pkg = trans.packages[4][0]
if pkg == package:
cache=apt.Cache()
cache.open(None)
cache.upgrade()
if cache[pkg].installed:
return cache[pkg].installed.version, cache[pkg].candidate.version
return False, False
except Exception as error:
log("Exception while checking versions: %s" %error)
return False, False
def _update_cache(self):
try:
if self.aptclient.update_cache(wait=True) == "exit-success":
return True
else:
return False
except errors.NotAuthorizedError:
log("You are not allowed to update the cache")
return False
def check_upgrade_available(self, package):
'''returns True if newer package is available in the repositories'''
installed, candidate = self._check_versions(package)
if installed and candidate:
if installed != candidate:
log("Version installed %s" %installed)
log("Version available %s" %candidate)
return True
else:
log("Already on newest version")
elif not installed:
log("No installed package found")
return False
else:
return False
def upgrade_package(self, package):
try:
log("Installing new version")
if self.aptclient.upgrade_packages([package], wait=True) == "exit-success":
log("Upgrade successful")
return True
except Exception as error:
log("Exception during upgrade: %s" %error)
return False
def upgrade_system(self):
try:
log("Upgrading system")
if self.aptclient.upgrade_system(wait=True) == "exit-success":
return True
except Exception as error:
log("Exception during system upgrade: %s" %error)
return False
def _getpassword(self):
if len(self._pwd) == 0:
self._pwd = get_password_from_user()
return self._pwd
def _apttransstarted(self):
pass
def _apterrorhandler(self, error):
log("Apt Error %s" %error)
|
gpl-2.0
|
jeffreyzli/pokerbot-2017
|
MiniTourn_Triple/GameData.py
|
1
|
15989
|
import HandRankings as Hand
from deuces.deuces import Card, Evaluator
class GameData:
def __init__(self, name, opponent_name, stack_size, bb):
# match stats
self.name = name
self.opponent_name = opponent_name
self.starting_stack_size = int(stack_size)
self.num_hands = 0
self.num_wins = 0
self.num_flop = 0
self.big_blind = int(bb)
# self pre-flop stats
self.pfr = 0
self.vpip = 0
self.three_bet = 0
self.fold_big_bet = 0
# opponent pre-flop stats
self.opponent_pfr = 0
self.opponent_vpip = 0
self.opponent_three_bet = 0
self.opponent_fold_pfr = 0
self.opponent_fold_three_bet = 0
# self post-flop stats
self.aggression_factor = False
self.showdown = 0
self.c_bet = 0
self.showdown_win = 0
self.double_barrel = 0
self.discarded_card = None
# opponent post-flop stats
self.opponent_c_bet = 0
self.opponent_fold_c_bet = 0
self.opponent_double_barrel = 0
# current hand stats
self.button = True
self.current_pot_size = 0
self.current_hand = []
self.current_hand_strength = 0.0
self.hand_class = ''
self.hand_score = 0
self.current_game_state = ''
self.board_cards = []
self.board_score = 0
self.last_actions = []
self.current_legal_actions = []
self.has_called = False
self.opponent_has_called = False
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.street_dict = {'0': 0, '3': 0, '4': 0, '5': 0}
self.discard = False
self.has_five_bet = False
self.has_bet_aggressively = False
self.time_bank = 0.0
self.opc = 0
self.current_stack_size = self.starting_stack_size
def new_hand(self, data_list):
self.num_hands += 1
self.button = data_list[2]
if "true" in self.button:
self.button = True
else:
self.button = False
self.current_hand = [data_list[3], data_list[4]]
self.current_hand_strength = Hand.hand_win_odds(self.current_hand)
self.current_game_state = 'PREFLOP'
self.board_cards = []
self.last_actions = []
self.current_legal_actions = []
self.street_dict = {'0': 0, '3': 0, '4': 0, '5': 0}
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.aggression_factor = False
self.discarded_card = None
def get_action(self, data_list):
self.current_pot_size = int(data_list[1])
self.opc = self.starting_stack_size - self.current_stack_size
self.time_bank = float(data_list[-1])
num_board_cards = int(data_list[2])
self.street_dict[str(num_board_cards)] += 1
if self.current_game_state == 'PREFLOP':
if self.street_dict['3'] > 0 and self.street_dict['4'] == 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'FLOPTURN'
self.num_flop += 1
elif self.current_game_state == 'FLOPTURN':
if self.street_dict['4'] > 0 and self.street_dict['5'] == 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'TURNRIVER'
elif self.current_game_state == 'TURNRIVER':
if self.street_dict['5'] > 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'POSTRIVER'
index = 3 + num_board_cards
num_last_actions = int(data_list[index])
index += 1
current_last_actions = []
for i in range(num_last_actions):
current_last_actions.append(data_list[index + i])
self.last_actions.append(current_last_actions)
if self.discard:
for action in current_last_actions:
if 'DISCARD' in action and self.name in action:
old_card = action[8:10]
new_card = action[11:13]
self.current_hand[self.current_hand.index(old_card)] = new_card
self.current_hand_strength = Hand.hand_win_odds(self.current_hand)
self.discard = False
break
for i in range(num_board_cards):
board_card = data_list[3 + i]
if board_card not in self.board_cards:
self.board_cards.append(data_list[3 + i])
if num_board_cards > 0:
board_cards = []
for board_card in self.board_cards:
board_cards.append(Card.new(board_card))
hand = []
for card in self.current_hand:
hand.append(Card.new(card))
self.hand_score = Evaluator().evaluate(hand, board_cards)
self.hand_class = Evaluator().class_to_string(Evaluator().get_rank_class(self.hand_score))
if num_board_cards == 5:
two_board_cards = []
three_board_cards = []
for i in range(2):
two_board_cards.append(Card.new(self.board_cards[i]))
for i in range(2, 5):
three_board_cards.append(Card.new(self.board_cards[i]))
self.board_score = Evaluator().evaluate(two_board_cards, three_board_cards)
if self.current_game_state == 'PREFLOP':
if self.current_pot_size == 4:
if self.button:
self.vpip += 1
self.has_called = True
else:
self.opponent_vpip += 1
self.opponent_has_called = True
else:
for action in current_last_actions:
if 'RAISE' in action:
round_num = self.street_dict['0']
if round_num == 1:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_two_bet = True
elif round_num == 2:
if self.button:
if self.name in action:
self.pfr += 1
self.vpip += 1
self.has_two_bet = True
else:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_three_bet = True
else:
if self.name in action:
self.pfr += 1
self.vpip += 1
self.has_three_bet = True
else:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_four_bet = True
elif round_num == 3:
if self.name in action:
self.pfr += 1
self.vpip += 1
elif 'CALL' in action:
if self.name in action:
self.vpip += 1
else:
self.opponent_vpip += 1
elif self.current_game_state == 'FLOPTURN':
round_num = self.street_dict['3']
if round_num == 1:
self.discard = True
elif round_num == 2:
for action in current_last_actions:
if 'BET' in action:
self.opponent_c_bet += 1
break
elif round_num == 3:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
elif 'RAISE' in action:
if self.name in action:
self.has_two_bet = True
else:
if self.button:
self.opponent_has_three_bet = True
else:
self.opponent_has_two_bet = True
elif round_num == 4:
for action in current_last_actions:
if 'RAISE' in action:
if self.name in action:
if self.button:
self.has_four_bet = True
else:
self.has_three_bet = True
break
elif self.current_game_state == 'TURNRIVER':
round_num = self.street_dict['4']
if round_num == 1:
self.discard = True
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
break
elif round_num == 2:
for action in current_last_actions:
if 'BET' in action:
self.opponent_c_bet += 1
break
elif round_num == 3:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
elif 'RAISE' in action:
if self.name in action:
self.has_two_bet = True
else:
if self.button:
self.opponent_has_three_bet = True
else:
self.opponent_has_two_bet = True
elif round_num == 4:
for action in current_last_actions:
if 'RAISE' in action:
if self.name in action:
if self.button:
self.has_four_bet = True
else:
self.has_three_bet = True
break
elif self.current_game_state == 'POSTRIVER':
round_num = self.street_dict['5']
if round_num == 1:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.double_barrel += 1
else:
self.opponent_double_barrel += 1
break
index += num_last_actions
num_legal_actions = int(data_list[index])
index += 1
self.current_legal_actions = []
for i in range(num_legal_actions):
self.current_legal_actions.append(data_list[index + i])
def legal_action(self, action):
for legal_action in self.current_legal_actions:
if action in legal_action:
if action == 'BET' or action == 'RAISE':
index = legal_action.index(':') + 1
sub = legal_action[index:]
index = sub.index(':')
return [int(sub[:index]), int(sub[index+1:])]
if action == 'CALL':
for last_action in self.last_actions[-1]:
if 'RAISE' in last_action and self.opponent_name in last_action:
sub = last_action[last_action.index(':')+1:]
return int(sub[:sub.index(':')])
return True
return None
def hand_over(self, data_list):
num_board_cards = data_list[3]
index = 4+num_board_cards
num_last_actions = data_list[index]
current_last_actions = []
for i in range(num_last_actions):
current_last_actions.append(data_list[index+i])
if self.current_game_state == 'PREFLOP':
for action in current_last_actions:
if 'FOLD' in action and self.opponent_name in action:
if self.button:
for last_action in self.last_actions[-1]:
if 'RAISE' in last_action and self.name in last_action:
self.opponent_fold_pfr += 1
if self.has_three_bet and not self.has_four_bet:
self.opponent_fold_three_bet += 1
self.num_wins += 1
else:
for last_action in current_last_actions:
if 'RAISE' in last_action and self.name in last_action:
self.opponent_fold_pfr += 1
if self.has_three_bet and not self.has_four_bet:
self.opponent_fold_three_bet += 1
self.num_wins += 1
elif self.current_game_state == 'FLOPTURN':
for action in current_last_actions:
if self.button:
if 'FOLD' in action and self.opponent_name in action:
for last_action in self.last_actions[-1]:
if 'BET' in last_action and self.name in last_action:
self.opponent_fold_c_bet += 1
self.num_wins += 1
else:
if 'FOLD' in action and self.opponent_name in action:
for last_action in current_last_actions:
if 'BET' in last_action and self.name in last_action:
self.opponent_fold_c_bet += 1
self.num_wins += 1
elif self.current_game_state == 'POSTRIVER':
for action in current_last_actions:
if 'WIN' in action:
if self.name in action:
self.num_wins += 1
for last_action in current_last_actions:
if 'SHOW' in last_action:
self.showdown += 1
self.showdown_win += 1
break
break
|
mit
|
JCA-Developpement/Odoo
|
addons/project_issue/project_issue.py
|
5
|
31214
|
#-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import calendar
from datetime import datetime,date
from dateutil import relativedelta
import json
import time
from openerp import api
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import html2plaintext
from openerp.tools.translate import _
class project_issue_version(osv.Model):
_name = "project.issue.version"
_order = "name desc"
_columns = {
'name': fields.char('Version Number', required=True),
'active': fields.boolean('Active', required=False),
}
_defaults = {
'active': 1,
}
class project_issue(osv.Model):
_name = "project.issue"
_description = "Project Issue"
_order = "priority desc, create_date desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project_issue.mt_issue_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project_issue.mt_issue_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence > 1,
},
'user_id': {
'project_issue.mt_issue_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project_issue.mt_issue_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project_issue.mt_issue_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default project by checking if present in the context """
return self._resolve_project_id_from_context(cr, uid, context=context)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context.get('default_project_id')
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return int(project_ids[0][0])
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('project_ids', 'in', project_id), ('fold', '=', False) if project_id: add project columns that are not folded
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Openday’s IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
Calendar = self.pool['resource.calendar']
res = dict.fromkeys(ids, dict())
for issue in self.browse(cr, uid, ids, context=context):
values = {
'day_open': 0.0, 'day_close': 0.0,
'working_hours_open': 0.0, 'working_hours_close': 0.0,
'days_since_creation': 0.0, 'inactivity_days': 0.0,
}
# if the working hours on the project are not defined, use default ones (8 -> 12 and 13 -> 17 * 5), represented by None
calendar_id = None
if issue.project_id and issue.project_id.resource_calendar_id:
calendar_id = issue.project_id.resource_calendar_id.id
dt_create_date = datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
if issue.date_open:
dt_date_open = datetime.strptime(issue.date_open, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_open'] = (dt_date_open - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_open'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_open,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
if issue.date_closed:
dt_date_closed = datetime.strptime(issue.date_closed, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_close'] = (dt_date_closed - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_close'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_closed,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
days_since_creation = datetime.today() - dt_create_date
values['days_since_creation'] = days_since_creation.days
if issue.date_action_last:
inactive_days = datetime.today() - datetime.strptime(issue.date_action_last, DEFAULT_SERVER_DATETIME_FORMAT)
elif issue.date_last_stage_update:
inactive_days = datetime.today() - datetime.strptime(issue.date_last_stage_update, DEFAULT_SERVER_DATETIME_FORMAT)
else:
inactive_days = datetime.today() - datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
values['inactivity_days'] = inactive_days.days
# filter only required values
for field in fields:
res[issue.id][field] = values[field]
return res
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
task_pool = self.pool.get('project.task')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
progress = 0.0
if issue.task_id:
progress = task_pool._hours_get(cr, uid, [issue.task_id.id], field_names, args, context=context)[issue.task_id.id]['progress']
res[issue.id] = {'progress' : progress}
return res
def _can_escalate(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for issue in self.browse(cr, uid, ids, context=context):
if issue.project_id.parent_id.type == 'contract':
res[issue.id] = True
return res
def on_change_project(self, cr, uid, ids, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def _get_issue_task(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for task in self.pool.get('project.task').browse(cr, uid, ids, context=context):
issues += issue_pool.search(cr, uid, [('task_id','=',task.id)])
return issues
def _get_issue_work(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id:
issues += issue_pool.search(cr, uid, [('task_id','=',work.task_id.id)])
return issues
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Issue', required=True),
'active': fields.boolean('Active', required=False),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'days_since_creation': fields.function(_compute_day, string='Days since creation date', \
multi='compute_day', type="integer", help="Difference in days between creation date and current date"),
'date_deadline': fields.date('Deadline'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.\
Define Responsible user and Email account for mail gateway.'),
'partner_id': fields.many2one('res.partner', 'Contact', select=1),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Private Note'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A Issue's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this issue\n"
" * Ready for next stage indicates the issue is ready to be pulled to the next stage",
required=False),
'email_from': fields.char('Email', size=128, help="These people will receive email.", select=1),
'email_cc': fields.char('Watchers Emails', size=256, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'date_open': fields.datetime('Assigned', readonly=True, select=True),
# Project Issue fields
'date_closed': fields.datetime('Closed', readonly=True, select=True),
'date': fields.datetime('Date'),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
'channel': fields.char('Channel', help="Communication channel."),
'categ_ids': fields.many2many('project.category', string='Tags'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority', select=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'stage_id': fields.many2one ('project.task.type', 'Stage',
track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'project_id': fields.many2one('project.project', 'Project', track_visibility='onchange', select=True),
'duration': fields.float('Duration'),
'task_id': fields.many2one('project.task', 'Task', domain="[('project_id','=',project_id)]"),
'day_open': fields.function(_compute_day, string='Days to Assign',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'day_close': fields.function(_compute_day, string='Days to Close',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'user_id': fields.many2one('res.users', 'Assigned to', required=False, select=1, track_visibility='onchange'),
'working_hours_open': fields.function(_compute_day, string='Working Hours to assign the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'working_hours_close': fields.function(_compute_day, string='Working Hours to close the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'inactivity_days': fields.function(_compute_day, string='Days since last action',
multi='compute_day', type="integer", help="Difference in days between last action and current date"),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'can_escalate': fields.function(_can_escalate, type='boolean', string='Can Escalate'),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.",
store = {
'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['task_id'], 10),
'project.task': (_get_issue_task, ['progress'], 10),
'project.task.work': (_get_issue_work, ['hours'], 10),
}),
}
_defaults = {
'active': 1,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': '0',
'kanban_state': 'normal',
'date_last_stage_update': fields.datetime.now,
'user_id': lambda obj, cr, uid, context: uid,
}
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def copy(self, cr, uid, id, default=None, context=None):
issue = self.read(cr, uid, [id], ['name'], context=context)[0]
if not default:
default = {}
default = default.copy()
default.update(name=_('%s (copy)') % (issue['name']))
return super(project_issue, self).copy(cr, uid, id, default=default, context=context)
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, None, vals.get('stage_id'), context=context)['value'])
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(project_issue, self).create(cr, uid, vals, context=create_context)
def write(self, cr, uid, ids, vals, context=None):
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, ids, vals.get('stage_id'), context=context)['value'])
vals['date_last_stage_update'] = fields.datetime.now()
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
# user_id change: update date_start
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
return super(project_issue, self).write(cr, uid, ids, vals, context)
def onchange_task_id(self, cr, uid, ids, task_id, context=None):
if not task_id:
return {'value': {}}
task = self.pool.get('project.task').browse(cr, uid, task_id, context=context)
return {'value': {'user_id': task.user_id.id, }}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
""" This function returns value of partner email address based on partner
:param part: Partner's id
"""
result = {}
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context)
result['email_from'] = partner.email
return {'value': result}
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_document_name'] = _("issues")
return super(project_issue, self).get_empty_list_help(cr, uid, help, context=context)
# -------------------------------------------------------
# Stage management
# -------------------------------------------------------
def onchange_stage_id(self, cr, uid, ids, stage_id, context=None):
if not stage_id:
return {'value': {}}
stage = self.pool['project.task.type'].browse(cr, uid, stage_id, context=context)
if stage.fold:
return {'value': {'date_closed': fields.datetime.now()}}
return {'value': {'date_closed': False}}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the issue:
- type: stage type must be the same or 'both'
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * (len(section_ids)-1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_escalate(self, cr, uid, ids, context=None): # FIXME rename this method to issue_escalate
for issue in self.browse(cr, uid, ids, context=context):
data = {}
esc_proj = issue.project_id.project_escalation_id
if not esc_proj:
raise osv.except_osv(_('Warning!'), _('You cannot escalate this issue.\nThe relevant Project has not configured the Escalation Project!'))
data['project_id'] = esc_proj.id
if esc_proj.user_id:
data['user_id'] = esc_proj.user_id.id
issue.write(data)
if issue.task_id:
issue.task_id.write({'project_id': esc_proj.id, 'user_id': False})
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
issues = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([issue.project_id.id for issue in issues if issue.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), context=context)
return dict((issue.id, aliases.get(issue.project_id and issue.project_id.id or 0, False)) for issue in issues)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(project_issue, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for issue in self.browse(cr, uid, ids, context=context):
if issue.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, issue, partner=issue.partner_id, reason=_('Customer'))
elif issue.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, issue, email=issue.email_from, reason=_('Customer Email'))
except (osv.except_osv, orm.except_orm): # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
context = dict(context or {}, state_to='draft')
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
'user_id': False,
}
defaults.update(custom_values)
res_id = super(project_issue, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
return res_id
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, content_subtype='html', **kwargs):
""" Overrides mail_thread message_post so that we can set the date of last action field when
a new message is posted on the issue.
"""
if context is None:
context = {}
res = super(project_issue, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
if thread_id and subtype:
self.write(cr, SUPERUSER_ID, thread_id, {'date_action_last': fields.datetime.now()}, context=context)
return res
class project(osv.Model):
_inherit = "project.project"
def _get_alias_models(self, cr, uid, context=None):
return [('project.task', "Tasks"), ("project.issue", "Issues")]
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
project_id: Issue.search_count(cr,uid, [('project_id', '=', project_id), ('stage_id.fold', '=', False)], context=context)
for project_id in ids
}
def _get_project_issue_data(self, cr, uid, ids, field_name, arg, context=None):
obj = self.pool['project.issue']
month_begin = date.today().replace(day=1)
date_begin = (month_begin - relativedelta.relativedelta(months=self._period_number - 1)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1]).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
res = {}
for id in ids:
created_domain = [('project_id', '=', id), ('date_closed', '>=', date_begin ), ('date_closed', '<=', date_end )]
res[id] = json.dumps(self.__get_bar_values(cr, uid, obj, created_domain, [ 'date_closed'], 'date_closed_count', 'date_closed', context=context))
return res
_columns = {
'project_escalation_id': fields.many2one('project.project', 'Project Escalation',
help='If any issue is escalated from the current Project, it will be listed under the project selected here.',
states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
'issue_count': fields.function(_issue_count, type='integer', string="Issues",),
'issue_ids': fields.one2many('project.issue', 'project_id',
domain=[('date_closed', '!=', False)]),
'monthly_issues': fields.function(_get_project_issue_data, type='char', readonly=True,
string='Project Issue By Month')
}
def _check_escalation(self, cr, uid, ids, context=None):
project_obj = self.browse(cr, uid, ids[0], context=context)
if project_obj.project_escalation_id:
if project_obj.project_escalation_id.id == project_obj.id:
return False
return True
_constraints = [
(_check_escalation, 'Error! You cannot assign escalation to the same project!', ['project_escalation_id'])
]
class account_analytic_account(osv.Model):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_issues': fields.boolean('Issues', help="Check this field if this project manages issues"),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_issues'] = template.use_issues
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
if context is None:
context = {}
res = super(account_analytic_account, self)._trigger_project_creation(cr, uid, vals, context=context)
return res or (vals.get('use_issues') and not 'project_creation_in_progress' in context)
def unlink(self, cr, uid, ids, context=None):
proj_ids = self.pool['project.project'].search(cr, uid, [('analytic_account_id', 'in', ids)])
has_issues = self.pool['project.issue'].search(cr, uid, [('project_id', 'in', proj_ids)], count=True, context=context)
if has_issues:
raise osv.except_osv(_('Warning!'), _('Please remove existing issues in the project linked to the accounts you want to delete.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, context=context)
class project_project(osv.Model):
_inherit = 'project.project'
_defaults = {
'use_issues': True
}
def _check_create_write_values(self, cr, uid, vals, context=None):
""" Perform some check on values given to create or write. """
# Handle use_tasks / use_issues: if only one is checked, alias should take the same model
if vals.get('use_tasks') and not vals.get('use_issues'):
vals['alias_model'] = 'project.task'
elif vals.get('use_issues') and not vals.get('use_tasks'):
vals['alias_model'] = 'project.issue'
def on_change_use_tasks_or_issues(self, cr, uid, ids, use_tasks, use_issues, context=None):
values = {}
if use_tasks and not use_issues:
values['alias_model'] = 'project.task'
elif not use_tasks and use_issues:
values['alias_model'] = 'project.issues'
return {'value': values}
def create(self, cr, uid, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).write(cr, uid, ids, vals, context=context)
class res_partner(osv.osv):
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
partner_id: Issue.search_count(cr,uid, [('partner_id', '=', partner_id)])
for partner_id in ids
}
""" Inherits partner and adds Issue information in the partner form """
_inherit = 'res.partner'
_columns = {
'issue_count': fields.function(_issue_count, string='# Issues', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jonathan-beard/edx-platform
|
common/lib/xmodule/xmodule/modulestore/tests/factories.py
|
34
|
21631
|
"""
Factories for use in tests of XBlocks.
"""
import functools
import pymongo.message
import threading
import traceback
from collections import defaultdict
from decorator import contextmanager
from uuid import uuid4
from factory import Factory, Sequence, lazy_attribute_sequence, lazy_attribute
from factory.containers import CyclicDefinitionError
from mock import patch
from nose.tools import assert_less_equal, assert_greater_equal
import dogstats_wrapper as dog_stats_api
from opaque_keys.edx.locations import Location
from opaque_keys.edx.keys import UsageKey
from xblock.core import XBlock
from xmodule.modulestore import prefer_xmodules, ModuleStoreEnum
from xmodule.tabs import CourseTab
from xmodule.x_module import DEPRECATION_VSCOMPAT_EVENT
class Dummy(object):
pass
class XModuleFactoryLock(threading.local):
"""
This class exists to store whether XModuleFactory can be accessed in a safe
way (meaning, in a context where the data it creates will be cleaned up).
Users of XModuleFactory (or its subclasses) should only call XModuleFactoryLock.enable
after ensuring that a) the modulestore will be cleaned up, and b) that XModuleFactoryLock.disable
will be called.
"""
def __init__(self):
super(XModuleFactoryLock, self).__init__()
self._enabled = False
def enable(self):
"""
Enable XModuleFactories. This should only be turned in a context
where the modulestore will be reset at the end of the test (such
as inside ModuleStoreTestCase).
"""
self._enabled = True
def disable(self):
"""
Disable XModuleFactories. This should be called once the data
from the factory has been cleaned up.
"""
self._enabled = False
def is_enabled(self):
"""
Return whether XModuleFactories are enabled.
"""
return self._enabled
XMODULE_FACTORY_LOCK = XModuleFactoryLock()
class XModuleFactory(Factory):
"""
Factory for XModules
"""
# We have to give a Factory a FACTORY_FOR.
# However, the class that we create is actually determined by the category
# specified in the factory
FACTORY_FOR = Dummy
@lazy_attribute
def modulestore(self):
msg = "XMODULE_FACTORY_LOCK not enabled. Please use ModuleStoreTestCase as your test baseclass."
assert XMODULE_FACTORY_LOCK.is_enabled(), msg
from xmodule.modulestore.django import modulestore
return modulestore()
last_course = threading.local()
class CourseFactory(XModuleFactory):
"""
Factory for XModule courses.
"""
org = Sequence('org.{}'.format)
number = Sequence('course_{}'.format)
display_name = Sequence('Run {}'.format)
# pylint: disable=unused-argument
@classmethod
def _create(cls, target_class, **kwargs):
"""
Create and return a new course. For performance reasons, we do not emit
signals during this process, but if you need signals to run, you can
pass `emit_signals=True` to this method.
"""
# All class attributes (from this class and base classes) are
# passed in via **kwargs. However, some of those aren't actual field values,
# so pop those off for use separately
org = kwargs.pop('org', None)
# because the factory provides a default 'number' arg, prefer the non-defaulted 'course' arg if any
number = kwargs.pop('course', kwargs.pop('number', None))
store = kwargs.pop('modulestore')
name = kwargs.get('name', kwargs.get('run', Location.clean(kwargs.get('display_name'))))
run = kwargs.pop('run', name)
user_id = kwargs.pop('user_id', ModuleStoreEnum.UserID.test)
emit_signals = kwargs.get('emit_signals', False)
# Pass the metadata just as field=value pairs
kwargs.update(kwargs.pop('metadata', {}))
default_store_override = kwargs.pop('default_store', None)
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
course_key = store.make_course_key(org, number, run)
with store.bulk_operations(course_key, emit_signals=emit_signals):
if default_store_override is not None:
with store.default_store(default_store_override):
new_course = store.create_course(org, number, run, user_id, fields=kwargs)
else:
new_course = store.create_course(org, number, run, user_id, fields=kwargs)
last_course.loc = new_course.location
return new_course
class LibraryFactory(XModuleFactory):
"""
Factory for creating a content library
"""
org = Sequence('org{}'.format)
library = Sequence('lib{}'.format)
display_name = Sequence('Test Library {}'.format)
# pylint: disable=unused-argument
@classmethod
def _create(cls, target_class, **kwargs):
"""
Create a library with a unique name and key.
All class attributes (from this class and base classes) are automagically
passed in via **kwargs.
"""
# some of the kwargst actual field values, so pop those off for use separately:
org = kwargs.pop('org')
library = kwargs.pop('library')
store = kwargs.pop('modulestore')
user_id = kwargs.pop('user_id', ModuleStoreEnum.UserID.test)
# Pass the metadata just as field=value pairs
kwargs.update(kwargs.pop('metadata', {}))
default_store_override = kwargs.pop('default_store', ModuleStoreEnum.Type.split)
with store.default_store(default_store_override):
new_library = store.create_library(org, library, user_id, fields=kwargs)
return new_library
class ItemFactory(XModuleFactory):
"""
Factory for XModule items.
"""
category = 'chapter'
parent = None
@lazy_attribute_sequence
def display_name(self, n):
return "{} {}".format(self.category, n)
@lazy_attribute
def location(self):
if self.display_name is None:
dest_name = uuid4().hex
else:
dest_name = self.display_name.replace(" ", "_")
new_location = self.parent_location.course_key.make_usage_key(
self.category,
dest_name
)
return new_location
@lazy_attribute
def parent_location(self):
default_location = getattr(last_course, 'loc', None)
try:
parent = self.parent
# This error is raised if the caller hasn't provided either parent or parent_location
# In this case, we'll just return the default parent_location
except CyclicDefinitionError:
return default_location
if parent is None:
return default_location
return parent.location
@classmethod
def _create(cls, target_class, **kwargs):
"""
Uses ``**kwargs``:
:parent_location: (required): the location of the parent module
(e.g. the parent course or section)
:category: the category of the resulting item.
:data: (optional): the data for the item
(e.g. XML problem definition for a problem item)
:display_name: (optional): the display name of the item
:metadata: (optional): dictionary of metadata attributes
:boilerplate: (optional) the boilerplate for overriding field values
:publish_item: (optional) whether or not to publish the item (default is True)
:target_class: is ignored
"""
# All class attributes (from this class and base classes) are
# passed in via **kwargs. However, some of those aren't actual field values,
# so pop those off for use separately
# catch any old style users before they get into trouble
assert 'template' not in kwargs
parent_location = kwargs.pop('parent_location', None)
data = kwargs.pop('data', None)
category = kwargs.pop('category', None)
display_name = kwargs.pop('display_name', None)
metadata = kwargs.pop('metadata', {})
location = kwargs.pop('location')
user_id = kwargs.pop('user_id', ModuleStoreEnum.UserID.test)
publish_item = kwargs.pop('publish_item', True)
assert isinstance(location, UsageKey)
assert location != parent_location
store = kwargs.pop('modulestore')
# This code was based off that in cms/djangoapps/contentstore/views.py
parent = kwargs.pop('parent', None) or store.get_item(parent_location)
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
if 'boilerplate' in kwargs:
template_id = kwargs.pop('boilerplate')
clz = XBlock.load_class(category, select=prefer_xmodules)
template = clz.get_template(template_id)
assert template is not None
metadata.update(template.get('metadata', {}))
if not isinstance(data, basestring):
data.update(template.get('data'))
# replace the display name with an optional parameter passed in from the caller
if display_name is not None:
metadata['display_name'] = display_name
module = store.create_child(
user_id,
parent.location,
location.block_type,
block_id=location.block_id,
metadata=metadata,
definition_data=data,
runtime=parent.runtime,
fields=kwargs,
)
# VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so
# if we add one then we need to also add it to the policy information (i.e. metadata)
# we should remove this once we can break this reference from the course to static tabs
if category == 'static_tab':
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:itemfactory_create_static_tab",
u"block:{}".format(location.block_type),
)
)
course = store.get_course(location.course_key)
course.tabs.append(
CourseTab.load('static_tab', name='Static Tab', url_slug=location.name)
)
store.update_item(course, user_id)
# parent and publish the item, so it can be accessed
if 'detached' not in module._class_tags:
parent.children.append(location)
store.update_item(parent, user_id)
if publish_item:
published_parent = store.publish(parent.location, user_id)
# module is last child of parent
return published_parent.get_children()[-1]
else:
return store.get_item(location)
elif publish_item:
return store.publish(location, user_id)
else:
return module
@contextmanager
def check_exact_number_of_calls(object_with_method, method_name, num_calls):
"""
Instruments the given method on the given object to verify the number of calls to the
method is exactly equal to 'num_calls'.
"""
with check_number_of_calls(object_with_method, method_name, num_calls, num_calls):
yield
def check_number_of_calls(object_with_method, method_name, maximum_calls, minimum_calls=1):
"""
Instruments the given method on the given object to verify the number of calls to the method is
less than or equal to the expected maximum_calls and greater than or equal to the expected minimum_calls.
"""
return check_sum_of_calls(object_with_method, [method_name], maximum_calls, minimum_calls)
class StackTraceCounter(object):
"""
A class that counts unique stack traces underneath a particular stack frame.
"""
def __init__(self, stack_depth, include_arguments=True):
"""
Arguments:
stack_depth (int): The number of stack frames above this constructor to capture.
include_arguments (bool): Whether to store the arguments that are passed
when capturing a stack trace.
"""
self.include_arguments = include_arguments
self._top_of_stack = traceback.extract_stack(limit=stack_depth)[0]
if self.include_arguments:
self._stacks = defaultdict(lambda: defaultdict(int))
else:
self._stacks = defaultdict(int)
def capture_stack(self, args, kwargs):
"""
Record the stack frames starting at the caller of this method, and
ending at the top of the stack as defined by the ``stack_depth``.
Arguments:
args: The positional arguments to capture at this stack frame
kwargs: The keyword arguments to capture at this stack frame
"""
# pylint: disable=broad-except
stack = traceback.extract_stack()[:-2]
if self._top_of_stack in stack:
stack = stack[stack.index(self._top_of_stack):]
if self.include_arguments:
safe_args = []
for arg in args:
try:
safe_args.append(repr(arg))
except Exception as exc:
safe_args.append('<un-repr-able value: {}'.format(exc))
safe_kwargs = {}
for key, kwarg in kwargs.items():
try:
safe_kwargs[key] = repr(kwarg)
except Exception as exc:
safe_kwargs[key] = '<un-repr-able value: {}'.format(exc)
self._stacks[tuple(stack)][tuple(safe_args), tuple(safe_kwargs.items())] += 1
else:
self._stacks[tuple(stack)] += 1
@property
def total_calls(self):
"""
Return the total number of stacks recorded.
"""
return sum(self.stack_calls(stack) for stack in self._stacks)
def stack_calls(self, stack):
"""
Return the number of calls to the supplied ``stack``.
"""
if self.include_arguments:
return sum(self._stacks[stack].values())
else:
return self._stacks[stack]
def __iter__(self):
"""
Iterate over all unique captured stacks.
"""
return iter(sorted(self._stacks.keys(), key=lambda stack: (self.stack_calls(stack), stack), reverse=True))
def __getitem__(self, stack):
"""
Return the set of captured calls with the supplied stack.
"""
return self._stacks[stack]
@classmethod
def capture_call(cls, func, stack_depth, include_arguments=True):
"""
A decorator that wraps ``func``, and captures each call to ``func``,
recording the stack trace, and optionally the arguments that the function
is called with.
Arguments:
func: the function to wrap
stack_depth: how far up the stack to truncate the stored stack traces (
this is counted from the call to ``capture_call``, rather than calls
to the captured function).
"""
stacks = StackTraceCounter(stack_depth, include_arguments)
# pylint: disable=missing-docstring
@functools.wraps(func)
def capture(*args, **kwargs):
stacks.capture_stack(args, kwargs)
return func(*args, **kwargs)
capture.stack_counter = stacks
return capture
@contextmanager
def check_sum_of_calls(object_, methods, maximum_calls, minimum_calls=1, include_arguments=True):
"""
Instruments the given methods on the given object to verify that the total sum of calls made to the
methods falls between minumum_calls and maximum_calls.
"""
mocks = {
method: StackTraceCounter.capture_call(
getattr(object_, method),
stack_depth=7,
include_arguments=include_arguments
)
for method in methods
}
with patch.multiple(object_, **mocks):
yield
call_count = sum(capture_fn.stack_counter.total_calls for capture_fn in mocks.values())
# Assertion errors don't handle multi-line values, so pretty-print to std-out instead
if not minimum_calls <= call_count <= maximum_calls:
messages = ["Expected between {} and {} calls, {} were made.\n\n".format(
minimum_calls,
maximum_calls,
call_count,
)]
for method_name, capture_fn in mocks.items():
stack_counter = capture_fn.stack_counter
messages.append("{!r} was called {} times:\n".format(
method_name,
stack_counter.total_calls
))
for stack in stack_counter:
messages.append(" called {} times:\n\n".format(stack_counter.stack_calls(stack)))
messages.append(" " + " ".join(traceback.format_list(stack)))
messages.append("\n\n")
if include_arguments:
for (args, kwargs), count in stack_counter[stack].items():
messages.append(" called {} times with:\n".format(count))
messages.append(" args: {}\n".format(args))
messages.append(" kwargs: {}\n\n".format(dict(kwargs)))
print "".join(messages)
# verify the counter actually worked by ensuring we have counted greater than (or equal to) the minimum calls
assert_greater_equal(call_count, minimum_calls)
# now verify the number of actual calls is less than (or equal to) the expected maximum
assert_less_equal(call_count, maximum_calls)
def mongo_uses_error_check(store):
"""
Does mongo use the error check as a separate message?
"""
if hasattr(store, 'mongo_wire_version'):
return store.mongo_wire_version() <= 1
if hasattr(store, 'modulestores'):
return any([mongo_uses_error_check(substore) for substore in store.modulestores])
return False
@contextmanager
def check_mongo_calls_range(max_finds=float("inf"), min_finds=0, max_sends=None, min_sends=None):
"""
Instruments the given store to count the number of calls to find (incl find_one) and the number
of calls to send_message which is for insert, update, and remove (if you provide num_sends). At the
end of the with statement, it compares the counts to the bounds provided in the arguments.
:param max_finds: the maximum number of find calls expected
:param min_finds: the minimum number of find calls expected
:param max_sends: If non-none, make sure number of send calls are <=max_sends
:param min_sends: If non-none, make sure number of send calls are >=min_sends
"""
with check_sum_of_calls(
pymongo.message,
['query', 'get_more'],
max_finds,
min_finds,
):
if max_sends is not None or min_sends is not None:
with check_sum_of_calls(
pymongo.message,
# mongo < 2.6 uses insert, update, delete and _do_batched_insert. >= 2.6 _do_batched_write
['insert', 'update', 'delete', '_do_batched_write_command', '_do_batched_insert', ],
max_sends if max_sends is not None else float("inf"),
min_sends if min_sends is not None else 0,
):
yield
else:
yield
@contextmanager
def check_mongo_calls(num_finds=0, num_sends=None):
"""
Instruments the given store to count the number of calls to find (incl find_one) and the number
of calls to send_message which is for insert, update, and remove (if you provide num_sends). At the
end of the with statement, it compares the counts to the num_finds and num_sends.
:param num_finds: the exact number of find calls expected
:param num_sends: If none, don't instrument the send calls. If non-none, count and compare to
the given int value.
"""
with check_mongo_calls_range(num_finds, num_finds, num_sends, num_sends):
yield
# This dict represents the attribute keys for a course's 'about' info.
# Note: The 'video' attribute is intentionally excluded as it must be
# handled separately; its value maps to an alternate key name.
# Reference : cms/djangoapps/models/settings/course_details.py
ABOUT_ATTRIBUTES = {
'effort': "Testing effort",
}
class CourseAboutFactory(XModuleFactory):
"""
Factory for XModule course about.
"""
@classmethod
def _create(cls, target_class, **kwargs): # pylint: disable=unused-argument
"""
Uses **kwargs:
effort: effor information
video : video link
"""
user_id = kwargs.pop('user_id', None)
course_id, course_runtime = kwargs.pop("course_id"), kwargs.pop("course_runtime")
store = kwargs.pop('modulestore')
for about_key in ABOUT_ATTRIBUTES:
about_item = store.create_xblock(course_runtime, course_id, 'about', about_key)
about_item.data = ABOUT_ATTRIBUTES[about_key]
store.update_item(about_item, user_id, allow_not_found=True)
about_item = store.create_xblock(course_runtime, course_id, 'about', 'video')
about_item.data = "www.youtube.com/embed/testing-video-link"
store.update_item(about_item, user_id, allow_not_found=True)
|
agpl-3.0
|
wbthomason/minigrade
|
minigrade.py
|
1
|
17857
|
from flask import Flask, render_template, request, jsonify, Response, abort, session, stream_with_context, redirect, g
from ast import literal_eval
import subprocess
import re
import requests
import json
import shutil
import time
import os
import sqlite3
import logging
import sys
import commands
import threading
minigrade = Flask(__name__)
PORT_NUMBER = 8000
# Put your own secret key here. You can't have mine!
minigrade.secret_key = <KEY>
urlmatch = re.compile('(?:git@|git://|https://)(?P<url>[\w@-]+\.[a-zA-Z]+[:/](?P<user>[a-zA-Z][a-zA-Z0-9-]+)/(?P<repo>.+))')
SERVER_IP = 'localhost'#'128.143.136.170'
logging.basicConfig(filename='grader.log',level=logging.DEBUG)
benchmark_mutex = threading.Lock()
def process_repo(repo):
logging.debug('Processing repo: ' + repo)
result = urlmatch.match(repo)
if not result:
return None
giturl = "https://" + result.group('url')
repository = result.group('repo')
if repository[-4:] == ".git":
repository = repository[:-4]
logging.debug('Returning: ' + str(repository))
return (giturl, repository, result.group('user'))
def sort_files_by_age(files):
filedata = [(filename, os.lstat(filename).st_ctime) for filename in files]
filedata = sorted(filedata, key = lambda x: x[1])
filedata = [filetuple[0] for filetuple in filedata]
filedata = filter(lambda x: not os.path.isdir(x), filedata)
return filedata
def cap_logs():
result_files = os.listdir('.')
if len(result_files) > 10:
filedata = sort_files_by_age(result_files)[:len(result_files) - 10]
for f in filedata:
os.remove(f)
def parse_httperf_output(output_str):
dur = -1
avg_resp = -1
io = -1
err = -1
for line in output_str.split('\n'):
# need test-duration(s), reply time(ms), Net I/O, errors
output_line = line.rstrip()
testduration = re.search(r'test-duration (\d+\.\d+) s', output_line)
replytime = re.search(r'Reply time \[ms\]: response (\d+\.\d+) .*', output_line)
netio = re.search(r'Net I/O: (\d+\.\d+) KB/s', output_line)
errorcount = re.search(r'Errors: total (\d+)', output_line)
if testduration:
#print "Test duration: %f s\n" % float(testduration.group(1))
dur = float(testduration.group(1))
elif replytime:
#print "Reply time: %f ms\n" % float(replytime.group(1))
avg_resp = float(replytime.group(1))
elif netio:
#print "Net I/O: %f MB\n" % float(netio.group(1)) * dur / 1024
io = float(netio.group(1)) * dur / 1024
elif errorcount:
#print "Error count: %d\n" % int(errorcount.group(1))
err = int(errorcount.group(1))
'''
print "Test duration: %f s" % dur
print "Reply time: %f ms" % avg_response
print "Net I/O: %f MB" % io
print "Error count: %d" % err
print "END HTTPERF\n"
'''
return dur, avg_resp, io, err
def grade_stream(assignment, repo):
yield "retry: 300000\n"
if 'email' not in session:
yield "data: inv: Please log in before running the autograder.\n\n"
raise StopIteration
#session['email'] = "[email protected]"
build = None
tests = []
repo_name = "NotADirectory"
cwd = os.getcwd()
try:
with open("tests/{}.test".format(assignment)) as testfile:
for idnum, testcase in enumerate(testfile):
test = literal_eval(' '.join(testcase.split(' ')[1:]))
if testcase.split(' ')[0] == "build":
build = test
else:
tests.append(test)
yield "data: tn: {} {}\n\n".format(test['name'], idnum)
except:
print "No test file for '{}'".format(assignment)
yield "data: inv: Error: No valid test file for {}\n\n".format(assignment)
raise StopIteration
try:
yield "data inv: Grading {} from {}...\n\n".format(assignment, repo)
logging.debug("Grading " + assignment + " from: " + repo);
os.chdir("results/{}".format(assignment))
if not os.path.isdir(session['email']):
os.mkdir(session['email'])
os.chdir(session['email'])
cap_logs()
result_files = sort_files_by_age(os.listdir('.'))
result_files.reverse()
# review the past results
for f in result_files:
yield "data: nextpast\n\n"
with open(f) as result:
for line in result:
yield "data: past: {}\n\n".format(line)
# start cloning the repository
# just skip it in ps3
if assignment == "PS3":
# ps3 remote benchmark
httperf_req_list_file_path = os.path.join(cwd, "tests/zhtta-test-NUL.txt")
cmd = "httperf --server %s --port 4414 --rate 10 --num-conns 60 --wlog=y,%s" % (repo, httperf_req_list_file_path) # actually IP address
#cmd = "ping -c 2 %s" % repo
yield "data: raw: Queuing for benchmark, please wait...\n\n"
benchmark_mutex.acquire()
logging.debug("Benchmark starts, please wait...");
yield "data: raw: Benchmark starts, please wait...\n\n"
import commands
yield "data: raw: {}\n\n".format(cmd)
ret_text = commands.getoutput(cmd)
benchmark_mutex.release()
for line in ret_text.split('\n'):
yield "data: raw: {}\n\n".format(line)
(dur, avg_resp, io, err) = parse_httperf_output(ret_text)
with open(str(time.time())+".result", 'w') as results:
results.write("Duration: %d s\n\n" % (dur))
results.write("Average Response Time: %d ms\n\n" % avg_resp)
results.write("IO: %dMB\n\n" % (io))
results.write("Errors: {}\n".format(err))
if dur != 1 and io > 280 and err == 0:
yield "data: tr: Pass %d %ds\n\n" % (0, dur)
yield "data: tr: Pass %d %dms\n\n" % (1, avg_resp)
yield "data: tr: Pass %d %dMB\n\n" % (2, io)
yield "data: tr: Pass %d %d errors\n\n" % (3, err)
update_top_runs(session['email'], str(dur), str(avg_resp))
else:
yield "data: tr: Fail %d %ds\n\n" % (0, dur)
yield "data: tr: Fail %d %dms\n\n" % (1, avg_resp)
yield "data: tr: Fail %d %dMB\n\n" % (2, io)
yield "data: tr: Fail %d %d errors\n\n" % (3, err)
#os.chdir(cwd)
#yield "data: done\n\n"
else:
with open(str(time.time())+".result", 'w') as results:
result = process_repo(repo)
if not result:
results.write("{} is not a valid git repository.\n".format(repo))
yield "data: inv: {} is not a valid git repository.\n\n".format(repo)
raise StopIteration
logging.debug("Processed repo...");
repo_url, repo_name, repo_user = result
if os.path.isdir(repo_name):
shutil.rmtree(repo_name)
try:
logging.debug("Cloning...")
yield "data inv: Cloning github repository...\n\n"
git = subprocess.check_output("git clone {}".format(repo_url).split(" "), stderr = subprocess.STDOUT)
logging.debug("Finished cloning...")
yield "data: raw: {}\n\n".format(git)
except Exception as e:
logging.debug("{} is not a valid repository, because we got {}\n".format(repo,e))
results.write("{} is not a valid repository, because we got {}\n".format(repo,e))
yield "data: inv: Error: {} is not a valid repository, because we got {}\n\n".format(repo,e)
raise StopIteration
logging.debug("Using repo {}.\n".format(repo))
results.write("Using repository {}.\n".format(repo))
os.chdir(repo_name)
# copying files to testing dir...
#yield "setting up files..."
#shutil.copy("/home/grader/minigrade/tests/testfiles/abc.txt", "abc.txt")
if build:
logging.debug("Building...")
success = re.compile(build['results'])
commands = build['cmd'].split(";")
for command in commands:
yield "data: raw: {}\n\n".format(command)
result = None
try:
result = subprocess.check_output(command, shell = True, stderr = subprocess.STDOUT)
except:
print "Error building"
if result:
for line in result.split('\n'):
yield "data: raw: {}\n\n".format(line)
else:
yield "data: raw: Error running {}\n\n".format(command)
if result and re.search(success, result):
results.write("Build success\n")
yield "data: tr: Pass 0\n\n"
else:
results.write("Build failed\n")
yield "data: tr: Fail 0\n\n"
yield "data: inv: Build failed!\n\n"
raise StopIteration
passed = 0
failed = 0
counter = 0
for idnum, test in enumerate(tests):
counter += 1
yield "data: raw: {}\n\n".format(test["cmd"])
success = re.compile(test['results'])
f = open("test_file{}".format(counter), 'w')
temp=""
for token in test['cmd'].split(';'):
temp = temp + './gash -c "{}"\n'.format(token)
print "{}: temp={}".format(counter, temp.rstrip())
f.write(temp.rstrip())
f.close()
cwd = os.getcwd()
print "cwd={}".format(cwd)
for dep in test['dep']:
print "dep={}".format(dep)
print "typeof(dep)={}".format(type(dep))
shutil.copy("/home/grader/minigrade/tests/testfiles/{}".format(dep), dep)
command = "/home/grader/minigrade/dockerscript.sh {} {} test_file{} output_file{}".format(cwd, cwd, counter, counter)
print "{}: command={}".format(counter, command)
returncode = subprocess.call(command, shell = True, stderr = subprocess.STDOUT)
os.chdir(cwd)
result =""
try:
r = open('{}/output_file{}'.format(cwd,counter), 'r')
result = ''.join(r.readlines()).rstrip()
r.close()
except:
print "{}: couldn't open output_file{}".format(counter, counter)
result="null"
print "{}: test {}".format(session['email'], counter)
print "returncode={}".format(returncode)
# only print the first 10 lines to prevent spamming
m = 0
for line in result.split('\n'):
if m < 10:
print "result from output_file{}={}".format(counter, line)
yield "data: raw: {}\n\n".format(line)
else:
break
m += 1
print "{}: done printing result".format(counter)
if m >= 10:
yield "data: raw: ...\n\n"
if (returncode == 0) and re.match(success, result):
results.write("Passed {}\n".format(test['name']))
passed += 1
yield "data: tr: Pass {}\n\n".format(idnum + 1)
else:
results.write("Failed {}\n".format(test['name']))
failed += 1
yield "data: tr: Fail {}\n\n".format(idnum + 1)
results.write("Total pass: {}\n".format(passed))
results.write("Total fail: {}\n".format(failed))
finally:
if os.path.isdir(repo_name):
shutil.rmtree(repo_name)
os.chdir(cwd)
yield "data: done\n\n"
@minigrade.route('/')
def index():
with open("grade.html") as sub_page:
return '\n'.join(sub_page.readlines())
@minigrade.route('/grade/')
def grade():
assignment = request.args.get("assign", "NoneSuch")
repo = request.args.get("repo", "NoneSuch")
logging.debug("Grading " + assignment + ": " + repo)
response = Response(stream_with_context(grade_stream(assignment, repo)), mimetype="text/event-stream")
logging.debug("Finished grading " + repo + ": " + str(response))
return response
@minigrade.route('/auth/login', methods=['POST', 'GET'])
def login():
if request.method == "GET":
return session['email'] if 'email' in session else "null"
# The request has to have an assertion for us to verify
if 'assertion' not in request.form:
abort(400)
# Send the assertion to Mozilla's verifier service.
data = {'assertion': request.form['assertion'], 'audience': 'http://' + SERVER_IP + ':'+ str(PORT_NUMBER)}
resp = requests.post('https://verifier.login.persona.org/verify', data=data, verify=True)
# Did the verifier respond?
if resp.ok:
# Parse the response
verification_data = json.loads(resp.content)
# Check if the assertion was valid
if verification_data['status'] == 'okay':
# Log the user in by setting a secure session cookie
session.update({'email': verification_data['email']})
logging.debug('Login as: ' + verification_data['email'])
return "Logged in as %s" % verification_data['email']
logging.debug('Login failure: ' + str(resp))
# Oops, something failed. Abort.
abort(500)
@minigrade.route('/auth/logout', methods=['POST'])
def logout():
session.pop('email', None)
return redirect('/')
# Server-side database methods
##########
database_path = <PATH>
@minigrade.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
if error:
print("There was an error closing the database: {}".format(error))
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(database_path)
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def init_db():
"""Creates the database tables."""
with minigrade.app_context():
db = get_db()
with minigrade.open_resource('schema.sql') as f:
db.cursor().executescript(f.read())
db.commit()
def query_db(query, args=(), one=False):
"""Returns a query to the database as a list"""
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
get_db().commit()
return (rv[0] if rv else None) if one else rv
############
# Leaderboard functions
#################
leaderboard_path = <PATH>
import random
@minigrade.route('/leaderboard.html')
def leaderboard():
with open("leaderboard.html") as sub_page:
return '\n'.join(sub_page.readlines())
@minigrade.route('/leaders.data')
def leaders():
with open("leaders.data") as sub_page:
return '\n'.join(sub_page.readlines())
def update_top_runs(user, duration, response):
''' Run this to update the top runs with an entry of user-duration-response time entry'''
q = query_db("SELECT * FROM topruns WHERE username=?", [user], one=True)
if q is None:
query_db("INSERT INTO topruns VALUES (?, ?, ?)", [user, str(duration), str(response)])
else:
query_db("UPDATE topruns SET duration=?, response=? WHERE username=?", [str(duration), str(response), user])
# THIS LINE determines how many users are shown on the leaderboard.
update_leaderboard(5)
def get_top_runs(num):
''' Returns the top num runs in a list of 3xnum elements:
the first is best duration/response time,
the second is best duration, third is response time'''
runs = query_db("SELECT * FROM topruns")
data = [[],[],[]]
runs.sort(key=heuristic)
data[0] = runs[:num]
runs.sort(key=lambda x: float(x[1]))
data[1] = runs[:num]
runs.sort(key=lambda x: float(x[2]))
data[2] = runs[:num]
return data
def heuristic(run):
'''returns a function of a weighing bewteen duration and response time'''
tot_duration = float(run[1])
avg_response = float(run[2])
return tot_duration * avg_response
def update_leaderboard(num):
'''Updates the leaderboard with 'num' entries for webpages to see'''
head = "<h2>Leaderboard</h2>"
tbl_template=lambda x: '''
<h3>%s</h3>
<table id="leaderboard-dr" style='width:100%%%%;border-spacing:10px'>
<tr><th style="text-align:left">ID</th>
<th style="text-align:left">Duration Time</th>
<th style="text-align:left">Response Time</th>
</tr>
%%s
</table>
'''%x
titles = ["Best duration/response time", "Best duration", "Best Response Time"]
data = get_top_runs(num)
fin = ""
for i, title in enumerate(titles):
tmp = tbl_template(title)
row = ""
for tup in data[i]:
# should be (username, duration, response time)
row += "<tr><td>{}</td><td>{}</td><td>{}</td></tr>".format(*tup)
fin += tmp % row
open(leaderboard_path, 'w').write(fin)
#Only run in chroot jail.
if __name__ == '__main__':
print "running..."
minigrade.run(host='0.0.0.0', debug=False, threaded=True, port=PORT_NUMBER)
#minigrade.run(debug=True, threaded=True, port=9080)
|
mit
|
GehenHe/Recognize-Face-on-Android
|
tensorflow/python/training/sync_replicas_optimizer_test.py
|
13
|
11344
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sync_replicas_optimizer.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import portpicker
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import server_lib
from tensorflow.python.training import training
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return them."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)
]
return workers, ps_servers
# Creates the workers and return their sessions, graphs, train_ops.
def get_workers(num_workers, replicas_to_aggregate, workers):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
with ops.device("/job:ps/task:0"):
global_step = variables.Variable(0, name="global_step", trainable=False)
var_0 = variables.Variable(0.0, name="v0")
with ops.device("/job:ps/task:1"):
var_1 = variables.Variable(1.0, name="v1")
var_sparse = variables.Variable([[3.0], [4.0]], name="v_sparse")
with ops.device("/job:worker/task:" + str(worker_id)):
grads_0 = constant_op.constant(0.1 + worker_id * 0.2)
grads_1 = constant_op.constant(0.9 + worker_id * 0.2)
# This is to test against sparse gradients.
grads_sparse = ops.IndexedSlices(
constant_op.constant(
[0.1 + worker_id * 0.2], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_opt = gradient_descent.GradientDescentOptimizer(2.0)
sync_rep_opt = training.SyncReplicasOptimizer(
sgd_opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers)
train_op = [
sync_rep_opt.apply_gradients(
zip([grads_0, grads_1, grads_sparse],
[var_0, var_1, var_sparse]),
global_step=global_step)
]
sync_replicas_hook = sync_rep_opt.make_session_run_hook(
is_chief, num_tokens=num_workers)
# Creates MonitoredSession
session = training.MonitoredTrainingSession(
master=workers[worker_id].target,
is_chief=is_chief,
hooks=[sync_replicas_hook])
sessions.append(session)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class SyncReplicasOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test2Workers(self):
num_workers = 2
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_0 = graphs[0].get_tensor_by_name("v0:0")
var_1_g_0 = graphs[0].get_tensor_by_name("v1:0")
local_step_0 = graphs[0].get_tensor_by_name("sync_rep_local_step:0")
self.assertAllEqual(0.0, sessions[0].run(var_0_g_0))
self.assertAllEqual(1.0, sessions[0].run(var_1_g_0))
self.assertAllEqual(0, sessions[0].run(local_step_0))
# Will just use session 1 to verify all the variables later.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
var_sparse_g_1 = graphs[1].get_tensor_by_name("v_sparse:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initialized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
self.assertAllClose([[3.0], [4.0]], sessions[1].run(var_sparse_g_1))
# We have initial tokens in the queue so we can call this one by one. After
# the first step, this will no longer work as there will be no more extra
# tokens in the queue.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# The global step should have been updated and the variables should now have
# the new values after the average of the gradients are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
self.assertAllClose([[3.0], [4.0 - (0.1 + 0.3) / 2 * 2.0]],
sessions[1].run(var_sparse_g_1))
# The local step for both workers should still be 0 because the initial
# tokens in the token queue are 0s. This means that the following
# computation of the gradients will be wasted as local_step is smaller than
# the current global step. However, this only happens once when the system
# just starts and this is necessary to make the system robust for the case
# when chief gets restarted by errors/preemption/...
self.assertAllEqual(0, sessions[0].run(local_step_0))
self.assertAllEqual(0, sessions[1].run(local_step_1))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. The variables are still the same.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[0].run(local_step_0))
self.assertAllEqual(1, sessions[1].run(local_step_1))
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
# At this step, the token queue is empty. So the 2 workers need to work
# together to proceed.
threads = []
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0])))
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1])))
# The two workers starts to execute the train op.
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# The global step should now be 2 and the gradients should have been
# applied twice.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
# 3 workers and one of them is backup.
def test3Workers1Backup(self):
num_workers = 3
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initilized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
# We have initial tokens in the queue so we can call this one by one. After
# the token queue becomes empty, they should be called concurrently.
# Here worker 0 and worker 2 finished first.
sessions[0].run(train_ops[0])
sessions[2].run(train_ops[2])
# The global step should have been updated since we only need to collect 2
# gradients. The variables should now have the new values after the average
# of the gradients from worker 0/2 are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllClose(0 - (0.1 + 0.5) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.3) / 2 * 2.0, sessions[1].run(var_1_g_1))
# Worker 1 finished later and its gradients will now be dropped as it is
# stale.
sessions[1].run(train_ops[1])
# As shown in the previous test, the local_step for all workers should be
# still 0 so their next computation will also be dropped.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
sessions[2].run(train_ops[2])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. Just check worker 1 as an example.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[1].run(local_step_1))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
# Lets worker 0 execute first.
# It will wait as we need 2 workers to finish this step and the global step
# should be still 1.
thread_0.start()
self.assertAllEqual(1, sessions[1].run(global_step))
# Starts worker 1.
thread_1.start()
thread_1.join()
# The global step should now be 2 and the gradients should have been
# applied again.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(-0.6 - (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
hefen1/chromium
|
tools/perf/benchmarks/benchmark_smoke_unittest.py
|
15
|
4441
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run the first page of one benchmark for every module.
Only benchmarks that have a composable measurement are included.
Ideally this test would be comprehensive, however, running one page
of every benchmark would run impractically long.
"""
import os
import unittest
from telemetry import benchmark as benchmark_module
from telemetry.core import discover
from telemetry.page import page_test
from telemetry.unittest_util import options_for_unittests
from telemetry.unittest_util import progress_reporter
def SmokeTestGenerator(benchmark):
# NOTE TO SHERIFFS: DO NOT DISABLE THIS TEST.
#
# This smoke test dynamically tests all benchmarks. So disabling it for one
# failing or flaky benchmark would disable a much wider swath of coverage
# than is usally intended. Instead, if a particular benchmark is failing,
# disable it in tools/perf/benchmarks/*.
@benchmark_module.Disabled('chromeos') # crbug.com/351114
def BenchmarkSmokeTest(self):
# Only measure a single page so that this test cycles reasonably quickly.
benchmark.options['pageset_repeat'] = 1
benchmark.options['page_repeat'] = 1
class SinglePageBenchmark(benchmark): # pylint: disable=W0232
def CreatePageSet(self, options):
# pylint: disable=E1002
ps = super(SinglePageBenchmark, self).CreatePageSet(options)
for p in ps.pages:
p.skip_waits = True
ps.user_stories = [p]
break
return ps
# Set the benchmark's default arguments.
options = options_for_unittests.GetCopy()
options.output_format = 'none'
options.suppress_gtest_report = True
parser = options.CreateParser()
benchmark.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
benchmark.SetArgumentDefaults(parser)
options.MergeDefaultValues(parser.get_default_values())
benchmark.ProcessCommandLineArgs(None, options)
benchmark_module.ProcessCommandLineArgs(None, options)
self.assertEqual(0, SinglePageBenchmark().Run(options),
msg='Failed: %s' % benchmark)
return BenchmarkSmokeTest
def load_tests(_, _2, _3):
suite = progress_reporter.TestSuite()
benchmarks_dir = os.path.dirname(__file__)
top_level_dir = os.path.dirname(benchmarks_dir)
measurements_dir = os.path.join(top_level_dir, 'measurements')
all_measurements = discover.DiscoverClasses(
measurements_dir, top_level_dir, page_test.PageTest).values()
# Using the default of |index_by_class_name=False| means that if a module
# has multiple benchmarks, only the last one is returned.
all_benchmarks = discover.DiscoverClasses(
benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
index_by_class_name=False).values()
for benchmark in all_benchmarks:
if hasattr(benchmark, 'test') and benchmark.test not in all_measurements:
# If the benchmark does not have a measurement, then it is not composable.
# Ideally we'd like to test these as well, but the non-composable
# benchmarks are usually long-running benchmarks.
continue
# TODO(tonyg): Smoke doesn't work with session_restore yet.
if (benchmark.Name().startswith('session_restore') or
benchmark.Name().startswith('skpicture_printer')):
continue
if hasattr(benchmark, 'generated_profile_archive'):
# We'd like to test these, but don't know how yet.
continue
class BenchmarkSmokeTest(unittest.TestCase):
pass
method = SmokeTestGenerator(benchmark)
# Make sure any decorators are propagated from the original declaration.
# (access to protected members) pylint: disable=W0212
# TODO(dpranke): Since we only pick the first test from every class
# (above), if that test is disabled, we'll end up not running *any*
# test from the class. We should probably discover all of the tests
# in a class, and then throw the ones we don't need away instead.
if hasattr(benchmark, '_enabled_strings'):
method._enabled_strings = benchmark._enabled_strings
if hasattr(benchmark, '_disabled_strings'):
method._disabled_strings = benchmark._disabled_strings
setattr(BenchmarkSmokeTest, benchmark.Name(), method)
suite.addTest(BenchmarkSmokeTest(benchmark.Name()))
return suite
|
bsd-3-clause
|
chromium/web-page-replay
|
third_party/dns/rdtypes/keybase.py
|
248
|
5017
|
# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.dnssec
import dns.rdata
_flags_from_text = {
'NOCONF': (0x4000, 0xC000),
'NOAUTH': (0x8000, 0xC000),
'NOKEY': (0xC000, 0xC000),
'FLAG2': (0x2000, 0x2000),
'EXTEND': (0x1000, 0x1000),
'FLAG4': (0x0800, 0x0800),
'FLAG5': (0x0400, 0x0400),
'USER': (0x0000, 0x0300),
'ZONE': (0x0100, 0x0300),
'HOST': (0x0200, 0x0300),
'NTYP3': (0x0300, 0x0300),
'FLAG8': (0x0080, 0x0080),
'FLAG9': (0x0040, 0x0040),
'FLAG10': (0x0020, 0x0020),
'FLAG11': (0x0010, 0x0010),
'SIG0': (0x0000, 0x000f),
'SIG1': (0x0001, 0x000f),
'SIG2': (0x0002, 0x000f),
'SIG3': (0x0003, 0x000f),
'SIG4': (0x0004, 0x000f),
'SIG5': (0x0005, 0x000f),
'SIG6': (0x0006, 0x000f),
'SIG7': (0x0007, 0x000f),
'SIG8': (0x0008, 0x000f),
'SIG9': (0x0009, 0x000f),
'SIG10': (0x000a, 0x000f),
'SIG11': (0x000b, 0x000f),
'SIG12': (0x000c, 0x000f),
'SIG13': (0x000d, 0x000f),
'SIG14': (0x000e, 0x000f),
'SIG15': (0x000f, 0x000f),
}
_protocol_from_text = {
'NONE' : 0,
'TLS' : 1,
'EMAIL' : 2,
'DNSSEC' : 3,
'IPSEC' : 4,
'ALL' : 255,
}
class KEYBase(dns.rdata.Rdata):
"""KEY-like record base
@ivar flags: the key flags
@type flags: int
@ivar protocol: the protocol for which this key may be used
@type protocol: int
@ivar algorithm: the algorithm used for the key
@type algorithm: int
@ivar key: the public key
@type key: string"""
__slots__ = ['flags', 'protocol', 'algorithm', 'key']
def __init__(self, rdclass, rdtype, flags, protocol, algorithm, key):
super(KEYBase, self).__init__(rdclass, rdtype)
self.flags = flags
self.protocol = protocol
self.algorithm = algorithm
self.key = key
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %d %s' % (self.flags, self.protocol, self.algorithm,
dns.rdata._base64ify(self.key))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
flags = tok.get_string()
if flags.isdigit():
flags = int(flags)
else:
flag_names = flags.split('|')
flags = 0
for flag in flag_names:
v = _flags_from_text.get(flag)
if v is None:
raise dns.exception.SyntaxError('unknown flag %s' % flag)
flags &= ~v[1]
flags |= v[0]
protocol = tok.get_string()
if protocol.isdigit():
protocol = int(protocol)
else:
protocol = _protocol_from_text.get(protocol)
if protocol is None:
raise dns.exception.SyntaxError('unknown protocol %s' % protocol)
algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
key = b64.decode('base64_codec')
return cls(rdclass, rdtype, flags, protocol, algorithm, key)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack("!HBB", self.flags, self.protocol, self.algorithm)
file.write(header)
file.write(self.key)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
if rdlen < 4:
raise dns.exception.FormError
header = struct.unpack('!HBB', wire[current : current + 4])
current += 4
rdlen -= 4
key = wire[current : current + rdlen]
return cls(rdclass, rdtype, header[0], header[1], header[2],
key)
from_wire = classmethod(from_wire)
def _cmp(self, other):
hs = struct.pack("!HBB", self.flags, self.protocol, self.algorithm)
ho = struct.pack("!HBB", other.flags, other.protocol, other.algorithm)
v = cmp(hs, ho)
if v == 0:
v = cmp(self.key, other.key)
return v
|
apache-2.0
|
nwjs/chromium.src
|
chrome/browser/vr/PRESUBMIT.py
|
3
|
1328
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting chrome/browser/vr
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import re
# chrome/PRESUBMIT.py blocks several linters due to the infeasibility of
# enforcing them on a large codebase. Here we'll start by enforcing all
# linters, and add exclusions if necessary.
#
# Note that this list must be non-empty, or cpplint will use its default set of
# filters.
LINT_FILTERS = [
'-build/include',
]
VERBOSITY_LEVEL = 4
INCLUDE_CPP_FILES_ONLY = (r'.*\.(cc|h)$',)
def _CheckChangeLintsClean(input_api, output_api):
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY)
return input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources, LINT_FILTERS, VERBOSITY_LEVEL)
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CheckChangeLintsClean(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CheckChangeLintsClean(input_api, output_api))
return results
|
bsd-3-clause
|
zalando/patroni
|
patroni/api.py
|
1
|
40300
|
import base64
import hmac
import json
import logging
import psycopg2
import time
import traceback
import dateutil.parser
import datetime
import os
import six
import socket
import sys
from ipaddress import ip_address, ip_network as _ip_network
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from six.moves.socketserver import ThreadingMixIn
from six.moves.urllib_parse import urlparse, parse_qs
from threading import Thread
from .exceptions import PostgresConnectionException, PostgresException
from .postgresql.misc import postgres_version_to_int
from .utils import deep_compare, enable_keepalive, parse_bool, patch_config, Retry, \
RetryFailedError, parse_int, split_host_port, tzutc, uri, cluster_as_json
logger = logging.getLogger(__name__)
def ip_network(value):
return _ip_network(value.decode('utf-8') if six.PY2 else value, False)
class RestApiHandler(BaseHTTPRequestHandler):
def _write_status_code_only(self, status_code):
message = self.responses[status_code][0]
self.wfile.write('{0} {1} {2}\r\n\r\n'.format(self.protocol_version, status_code, message).encode('utf-8'))
self.log_request(status_code)
def _write_response(self, status_code, body, content_type='text/html', headers=None):
self.send_response(status_code)
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
for name, value in headers.items():
self.send_header(name, value)
for name, value in self.server.http_extra_headers.items():
self.send_header(name, value)
self.end_headers()
self.wfile.write(body.encode('utf-8'))
def _write_json_response(self, status_code, response):
self._write_response(status_code, json.dumps(response, default=str), content_type='application/json')
def check_access(func):
"""Decorator function to check the source ip, authorization header. or client certificates
Usage example:
@check_access
def do_PUT_foo():
pass
"""
def wrapper(self, *args, **kwargs):
if self.server.check_access(self):
return func(self, *args, **kwargs)
return wrapper
def _write_status_response(self, status_code, response):
patroni = self.server.patroni
tags = patroni.ha.get_effective_tags()
if tags:
response['tags'] = tags
if patroni.postgresql.sysid:
response['database_system_identifier'] = patroni.postgresql.sysid
if patroni.postgresql.pending_restart:
response['pending_restart'] = True
response['patroni'] = {'version': patroni.version, 'scope': patroni.postgresql.scope}
if patroni.scheduled_restart and isinstance(patroni.scheduled_restart, dict):
response['scheduled_restart'] = patroni.scheduled_restart.copy()
del response['scheduled_restart']['postmaster_start_time']
response['scheduled_restart']['schedule'] = (response['scheduled_restart']['schedule']).isoformat()
if not patroni.ha.watchdog.is_healthy:
response['watchdog_failed'] = True
if patroni.ha.is_paused():
response['pause'] = True
qsize = patroni.logger.queue_size
if qsize > patroni.logger.NORMAL_LOG_QUEUE_SIZE:
response['logger_queue_size'] = qsize
lost = patroni.logger.records_lost
if lost:
response['logger_records_lost'] = lost
self._write_json_response(status_code, response)
def do_GET(self, write_status_code_only=False):
"""Default method for processing all GET requests which can not be routed to other methods"""
path = '/master' if self.path == '/' else self.path
response = self.get_postgresql_status()
patroni = self.server.patroni
cluster = patroni.dcs.cluster
leader_optime = cluster and cluster.last_lsn or 0
replayed_location = response.get('xlog', {}).get('replayed_location', 0)
max_replica_lag = parse_int(self.path_query.get('lag', [sys.maxsize])[0], 'B')
if max_replica_lag is None:
max_replica_lag = sys.maxsize
is_lagging = leader_optime and leader_optime > replayed_location + max_replica_lag
replica_status_code = 200 if not patroni.noloadbalance and not is_lagging and \
response.get('role') == 'replica' and response.get('state') == 'running' else 503
if not cluster and patroni.ha.is_paused():
leader_status_code = 200 if response.get('role') in ('master', 'standby_leader') else 503
primary_status_code = 200 if response.get('role') == 'master' else 503
standby_leader_status_code = 200 if response.get('role') == 'standby_leader' else 503
elif patroni.ha.is_leader():
leader_status_code = 200
if patroni.ha.is_standby_cluster():
primary_status_code = replica_status_code = 503
standby_leader_status_code = 200 if response.get('role') in ('replica', 'standby_leader') else 503
else:
primary_status_code = 200
standby_leader_status_code = 503
else:
leader_status_code = primary_status_code = standby_leader_status_code = 503
status_code = 503
ignore_tags = False
if 'standby_leader' in path or 'standby-leader' in path:
status_code = standby_leader_status_code
ignore_tags = True
elif 'leader' in path:
status_code = leader_status_code
ignore_tags = True
elif 'master' in path or 'primary' in path or 'read-write' in path:
status_code = primary_status_code
ignore_tags = True
elif 'replica' in path:
status_code = replica_status_code
elif 'read-only' in path:
status_code = 200 if 200 in (primary_status_code, standby_leader_status_code) else replica_status_code
elif 'health' in path:
status_code = 200 if response.get('state') == 'running' else 503
elif cluster: # dcs is available
is_synchronous = cluster.is_synchronous_mode() and cluster.sync \
and patroni.postgresql.name in cluster.sync.members
if path in ('/sync', '/synchronous') and is_synchronous:
status_code = replica_status_code
elif path in ('/async', '/asynchronous') and not is_synchronous:
status_code = replica_status_code
# check for user defined tags in query params
if not ignore_tags and status_code == 200:
qs_tag_prefix = "tag_"
for qs_key, qs_value in self.path_query.items():
if not qs_key.startswith(qs_tag_prefix):
continue
qs_key = qs_key[len(qs_tag_prefix):]
qs_value = qs_value[0]
instance_tag_value = patroni.tags.get(qs_key)
# tag not registered for instance
if instance_tag_value is None:
status_code = 503
break
if not isinstance(instance_tag_value, six.string_types):
instance_tag_value = str(instance_tag_value).lower()
if instance_tag_value != qs_value:
status_code = 503
break
if write_status_code_only: # when haproxy sends OPTIONS request it reads only status code and nothing more
self._write_status_code_only(status_code)
else:
self._write_status_response(status_code, response)
def do_OPTIONS(self):
self.do_GET(write_status_code_only=True)
def do_GET_liveness(self):
self._write_status_code_only(200)
def do_GET_readiness(self):
patroni = self.server.patroni
if patroni.ha.is_leader():
status_code = 200
elif patroni.postgresql.state == 'running':
status_code = 200 if patroni.dcs.cluster else 503
else:
status_code = 503
self._write_status_code_only(status_code)
def do_GET_patroni(self):
response = self.get_postgresql_status(True)
self._write_status_response(200, response)
def do_GET_cluster(self):
cluster = self.server.patroni.dcs.get_cluster(True)
self._write_json_response(200, cluster_as_json(cluster))
def do_GET_history(self):
cluster = self.server.patroni.dcs.cluster or self.server.patroni.dcs.get_cluster()
self._write_json_response(200, cluster.history and cluster.history.lines or [])
def do_GET_config(self):
cluster = self.server.patroni.dcs.cluster or self.server.patroni.dcs.get_cluster()
if cluster.config:
self._write_json_response(200, cluster.config.data)
else:
self.send_error(502)
def do_GET_metrics(self):
postgres = self.get_postgresql_status(True)
patroni = self.server.patroni
epoch = datetime.datetime(1970, 1, 1, tzinfo=tzutc)
metrics = []
scope_label = '{{scope="{0}"}}'.format(patroni.postgresql.scope)
metrics.append("# HELP patroni_version Patroni semver without periods.")
metrics.append("# TYPE patroni_version gauge")
padded_semver = ''.join([x.zfill(2) for x in patroni.version.split('.')]) # 2.0.2 => 020002
metrics.append("patroni_version{0} {1}".format(scope_label, padded_semver))
metrics.append("# HELP patroni_postgres_running Value is 1 if Postgres is running, 0 otherwise.")
metrics.append("# TYPE patroni_postgres_running gauge")
metrics.append("patroni_postgres_running{0} {1}".format(scope_label, int(postgres['state'] == 'running')))
metrics.append("# HELP patroni_postmaster_start_time Epoch seconds since Postgres started.")
metrics.append("# TYPE patroni_postmaster_start_time gauge")
postmaster_start_time = postgres.get('postmaster_start_time')
postmaster_start_time = (postmaster_start_time - epoch).total_seconds() if postmaster_start_time else 0
metrics.append("patroni_postmaster_start_time{0} {1}".format(scope_label, postmaster_start_time))
metrics.append("# HELP patroni_master Value is 1 if this node is the leader, 0 otherwise.")
metrics.append("# TYPE patroni_master gauge")
metrics.append("patroni_master{0} {1}".format(scope_label, int(postgres['role'] == 'master')))
metrics.append("# HELP patroni_xlog_location Current location of the Postgres"
" transaction log, 0 if this node is not the leader.")
metrics.append("# TYPE patroni_xlog_location counter")
metrics.append("patroni_xlog_location{0} {1}".format(scope_label, postgres.get('xlog', {}).get('location', 0)))
metrics.append("# HELP patroni_standby_leader Value is 1 if this node is the standby_leader, 0 otherwise.")
metrics.append("# TYPE patroni_standby_leader gauge")
metrics.append("patroni_standby_leader{0} {1}".format(scope_label, int(postgres['role'] == 'standby_leader')))
metrics.append("# HELP patroni_replica Value is 1 if this node is a replica, 0 otherwise.")
metrics.append("# TYPE patroni_replica gauge")
metrics.append("patroni_replica{0} {1}".format(scope_label, int(postgres['role'] == 'replica')))
metrics.append("# HELP patroni_xlog_received_location Current location of the received"
" Postgres transaction log, 0 if this node is not a replica.")
metrics.append("# TYPE patroni_xlog_received_location counter")
metrics.append("patroni_xlog_received_location{0} {1}"
.format(scope_label, postgres.get('xlog', {}).get('received_location', 0)))
metrics.append("# HELP patroni_xlog_replayed_location Current location of the replayed"
" Postgres transaction log, 0 if this node is not a replica.")
metrics.append("# TYPE patroni_xlog_replayed_location counter")
metrics.append("patroni_xlog_replayed_location{0} {1}"
.format(scope_label, postgres.get('xlog', {}).get('replayed_location', 0)))
metrics.append("# HELP patroni_xlog_replayed_timestamp Current timestamp of the replayed"
" Postgres transaction log, 0 if null.")
metrics.append("# TYPE patroni_xlog_replayed_timestamp gauge")
replayed_timestamp = postgres.get('xlog', {}).get('replayed_timestamp')
replayed_timestamp = (replayed_timestamp - epoch).total_seconds() if replayed_timestamp else 0
metrics.append("patroni_xlog_replayed_timestamp{0} {1}".format(scope_label, replayed_timestamp))
metrics.append("# HELP patroni_xlog_paused Value is 1 if the Postgres xlog is paused, 0 otherwise.")
metrics.append("# TYPE patroni_xlog_paused gauge")
metrics.append("patroni_xlog_paused{0} {1}"
.format(scope_label, int(postgres.get('xlog', {}).get('paused', False) is True)))
metrics.append("# HELP patroni_postgres_server_version Version of Postgres (if running), 0 otherwise.")
metrics.append("# TYPE patroni_postgres_server_version gauge")
metrics.append("patroni_postgres_server_version {0} {1}".format(scope_label, postgres.get('server_version', 0)))
metrics.append("# HELP patroni_cluster_unlocked Value is 1 if the cluster is unlocked, 0 if locked.")
metrics.append("# TYPE patroni_cluster_unlocked gauge")
metrics.append("patroni_cluster_unlocked{0} {1}".format(scope_label, int(postgres['cluster_unlocked'])))
metrics.append("# HELP patroni_postgres_timeline Postgres timeline of this node (if running), 0 otherwise.")
metrics.append("# TYPE patroni_postgres_timeline counter")
metrics.append("patroni_postgres_timeline{0} {1}".format(scope_label, postgres.get('timeline', 0)))
self._write_response(200, '\n'.join(metrics)+'\n', content_type='text/plain')
def _read_json_content(self, body_is_optional=False):
if 'content-length' not in self.headers:
return self.send_error(411) if not body_is_optional else {}
try:
content_length = int(self.headers.get('content-length'))
if content_length == 0 and body_is_optional:
return {}
request = json.loads(self.rfile.read(content_length).decode('utf-8'))
if isinstance(request, dict) and (request or body_is_optional):
return request
except Exception:
logger.exception('Bad request')
self.send_error(400)
@check_access
def do_PATCH_config(self):
request = self._read_json_content()
if request:
cluster = self.server.patroni.dcs.get_cluster(True)
if not (cluster.config and cluster.config.modify_index):
return self.send_error(503)
data = cluster.config.data.copy()
if patch_config(data, request):
value = json.dumps(data, separators=(',', ':'))
if not self.server.patroni.dcs.set_config_value(value, cluster.config.index):
return self.send_error(409)
self.server.patroni.ha.wakeup()
self._write_json_response(200, data)
@check_access
def do_PUT_config(self):
request = self._read_json_content()
if request:
cluster = self.server.patroni.dcs.get_cluster()
if not deep_compare(request, cluster.config.data):
value = json.dumps(request, separators=(',', ':'))
if not self.server.patroni.dcs.set_config_value(value):
return self.send_error(502)
self._write_json_response(200, request)
@check_access
def do_POST_reload(self):
self.server.patroni.sighup_handler()
self._write_response(202, 'reload scheduled')
@staticmethod
def parse_schedule(schedule, action):
""" parses the given schedule and validates at """
error = None
scheduled_at = None
try:
scheduled_at = dateutil.parser.parse(schedule)
if scheduled_at.tzinfo is None:
error = 'Timezone information is mandatory for the scheduled {0}'.format(action)
status_code = 400
elif scheduled_at < datetime.datetime.now(tzutc):
error = 'Cannot schedule {0} in the past'.format(action)
status_code = 422
else:
status_code = None
except (ValueError, TypeError):
logger.exception('Invalid scheduled %s time: %s', action, schedule)
error = 'Unable to parse scheduled timestamp. It should be in an unambiguous format, e.g. ISO 8601'
status_code = 422
return (status_code, error, scheduled_at)
@check_access
def do_POST_restart(self):
status_code = 500
data = 'restart failed'
request = self._read_json_content(body_is_optional=True)
cluster = self.server.patroni.dcs.get_cluster()
if request is None:
# failed to parse the json
return
if request:
logger.debug("received restart request: {0}".format(request))
if cluster.is_paused() and 'schedule' in request:
self._write_response(status_code, "Can't schedule restart in the paused state")
return
for k in request:
if k == 'schedule':
(_, data, request[k]) = self.parse_schedule(request[k], "restart")
if _:
status_code = _
break
elif k == 'role':
if request[k] not in ('master', 'replica'):
status_code = 400
data = "PostgreSQL role should be either master or replica"
break
elif k == 'postgres_version':
try:
postgres_version_to_int(request[k])
except PostgresException as e:
status_code = 400
data = e.value
break
elif k == 'timeout':
request[k] = parse_int(request[k], 's')
if request[k] is None or request[k] <= 0:
status_code = 400
data = "Timeout should be a positive number of seconds"
break
elif k != 'restart_pending':
status_code = 400
data = "Unknown filter for the scheduled restart: {0}".format(k)
break
else:
if 'schedule' not in request:
try:
status, data = self.server.patroni.ha.restart(request)
status_code = 200 if status else 503
except Exception:
logger.exception('Exception during restart')
status_code = 400
else:
if self.server.patroni.ha.schedule_future_restart(request):
data = "Restart scheduled"
status_code = 202
else:
data = "Another restart is already scheduled"
status_code = 409
self._write_response(status_code, data)
@check_access
def do_DELETE_restart(self):
if self.server.patroni.ha.delete_future_restart():
data = "scheduled restart deleted"
code = 200
else:
data = "no restarts are scheduled"
code = 404
self._write_response(code, data)
@check_access
def do_DELETE_switchover(self):
failover = self.server.patroni.dcs.get_cluster().failover
if failover and failover.scheduled_at:
if not self.server.patroni.dcs.manual_failover('', '', index=failover.index):
return self.send_error(409)
else:
data = "scheduled switchover deleted"
code = 200
else:
data = "no switchover is scheduled"
code = 404
self._write_response(code, data)
@check_access
def do_POST_reinitialize(self):
request = self._read_json_content(body_is_optional=True)
if request:
logger.debug('received reinitialize request: %s', request)
force = isinstance(request, dict) and parse_bool(request.get('force')) or False
data = self.server.patroni.ha.reinitialize(force)
if data is None:
status_code = 200
data = 'reinitialize started'
else:
status_code = 503
self._write_response(status_code, data)
def poll_failover_result(self, leader, candidate, action):
timeout = max(10, self.server.patroni.dcs.loop_wait)
for _ in range(0, timeout*2):
time.sleep(1)
try:
cluster = self.server.patroni.dcs.get_cluster()
if not cluster.is_unlocked() and cluster.leader.name != leader:
if not candidate or candidate == cluster.leader.name:
return 200, 'Successfully {0}ed over to "{1}"'.format(action[:-4], cluster.leader.name)
else:
return 200, '{0}ed over to "{1}" instead of "{2}"'.format(action[:-4].title(),
cluster.leader.name, candidate)
if not cluster.failover:
return 503, action.title() + ' failed'
except Exception as e:
logger.debug('Exception occurred during polling %s result: %s', action, e)
return 503, action.title() + ' status unknown'
def is_failover_possible(self, cluster, leader, candidate, action):
if leader and (not cluster.leader or cluster.leader.name != leader):
return 'leader name does not match'
if candidate:
if action == 'switchover' and cluster.is_synchronous_mode() and candidate not in cluster.sync.members:
return 'candidate name does not match with sync_standby'
members = [m for m in cluster.members if m.name == candidate]
if not members:
return 'candidate does not exists'
elif cluster.is_synchronous_mode():
members = [m for m in cluster.members if m.name in cluster.sync.members]
if not members:
return action + ' is not possible: can not find sync_standby'
else:
members = [m for m in cluster.members if m.name != cluster.leader.name and m.api_url]
if not members:
return action + ' is not possible: cluster does not have members except leader'
for st in self.server.patroni.ha.fetch_nodes_statuses(members):
if st.failover_limitation() is None:
return None
return action + ' is not possible: no good candidates have been found'
@check_access
def do_POST_failover(self, action='failover'):
request = self._read_json_content()
(status_code, data) = (400, '')
if not request:
return
leader = request.get('leader')
candidate = request.get('candidate') or request.get('member')
scheduled_at = request.get('scheduled_at')
cluster = self.server.patroni.dcs.get_cluster()
logger.info("received %s request with leader=%s candidate=%s scheduled_at=%s",
action, leader, candidate, scheduled_at)
if action == 'failover' and not candidate:
data = 'Failover could be performed only to a specific candidate'
elif action == 'switchover' and not leader:
data = 'Switchover could be performed only from a specific leader'
if not data and scheduled_at:
if not leader:
data = 'Scheduled {0} is possible only from a specific leader'.format(action)
if not data and cluster.is_paused():
data = "Can't schedule {0} in the paused state".format(action)
if not data:
(status_code, data, scheduled_at) = self.parse_schedule(scheduled_at, action)
if not data and cluster.is_paused() and not candidate:
data = action.title() + ' is possible only to a specific candidate in a paused state'
if not data and not scheduled_at:
data = self.is_failover_possible(cluster, leader, candidate, action)
if data:
status_code = 412
if not data:
if self.server.patroni.dcs.manual_failover(leader, candidate, scheduled_at=scheduled_at):
self.server.patroni.ha.wakeup()
if scheduled_at:
data = action.title() + ' scheduled'
status_code = 202
else:
status_code, data = self.poll_failover_result(cluster.leader and cluster.leader.name,
candidate, action)
else:
data = 'failed to write {0} key into DCS'.format(action)
status_code = 503
self._write_response(status_code, data)
def do_POST_switchover(self):
self.do_POST_failover(action='switchover')
def parse_request(self):
"""Override parse_request method to enrich basic functionality of `BaseHTTPRequestHandler` class
Original class can only invoke do_GET, do_POST, do_PUT, etc method implementations if they are defined.
But we would like to have at least some simple routing mechanism, i.e.:
GET /uri1/part2 request should invoke `do_GET_uri1()`
POST /other should invoke `do_POST_other()`
If the `do_<REQUEST_METHOD>_<first_part_url>` method does not exists we'll fallback to original behavior."""
ret = BaseHTTPRequestHandler.parse_request(self)
if ret:
urlpath = urlparse(self.path)
self.path = urlpath.path
self.path_query = parse_qs(urlpath.query) or {}
mname = self.path.lstrip('/').split('/')[0]
mname = self.command + ('_' + mname if mname else '')
if hasattr(self, 'do_' + mname):
self.command = mname
return ret
def query(self, sql, *params, **kwargs):
if not kwargs.get('retry', False):
return self.server.query(sql, *params)
retry = Retry(delay=1, retry_exceptions=PostgresConnectionException)
return retry(self.server.query, sql, *params)
def get_postgresql_status(self, retry=False):
postgresql = self.server.patroni.postgresql
try:
cluster = self.server.patroni.dcs.cluster
if postgresql.state not in ('running', 'restarting', 'starting'):
raise RetryFailedError('')
stmt = ("SELECT " + postgresql.POSTMASTER_START_TIME + ", " + postgresql.TL_LSN + ","
" pg_catalog.pg_last_xact_replay_timestamp(),"
" pg_catalog.array_to_json(pg_catalog.array_agg(pg_catalog.row_to_json(ri))) "
"FROM (SELECT (SELECT rolname FROM pg_authid WHERE oid = usesysid) AS usename,"
" application_name, client_addr, w.state, sync_state, sync_priority"
" FROM pg_catalog.pg_stat_get_wal_senders() w, pg_catalog.pg_stat_get_activity(pid)) AS ri")
row = self.query(stmt.format(postgresql.wal_name, postgresql.lsn_name), retry=retry)[0]
result = {
'state': postgresql.state,
'postmaster_start_time': row[0],
'role': 'replica' if row[1] == 0 else 'master',
'server_version': postgresql.server_version,
'cluster_unlocked': bool(not cluster or cluster.is_unlocked()),
'xlog': ({
'received_location': row[4] or row[3],
'replayed_location': row[3],
'replayed_timestamp': row[6],
'paused': row[5]} if row[1] == 0 else {
'location': row[2]
})
}
if result['role'] == 'replica' and self.server.patroni.ha.is_standby_cluster():
result['role'] = postgresql.role
if row[1] > 0:
result['timeline'] = row[1]
else:
leader_timeline = None if not cluster or cluster.is_unlocked() else cluster.leader.timeline
result['timeline'] = postgresql.replica_cached_timeline(leader_timeline)
if row[7]:
result['replication'] = row[7]
return result
except (psycopg2.Error, RetryFailedError, PostgresConnectionException):
state = postgresql.state
if state == 'running':
logger.exception('get_postgresql_status')
state = 'unknown'
return {'state': state, 'role': postgresql.role}
def handle_one_request(self):
self.__start_time = time.time()
BaseHTTPRequestHandler.handle_one_request(self)
def log_message(self, fmt, *args):
latency = 1000.0 * (time.time() - self.__start_time)
logger.debug("API thread: %s - - %s latency: %0.3f ms", self.client_address[0], fmt % args, latency)
class RestApiServer(ThreadingMixIn, HTTPServer, Thread):
# On 3.7+ the `ThreadingMixIn` gathers all non-daemon worker threads in order to join on them at server close.
daemon_threads = True # Make worker threads "fire and forget" to prevent a memory leak.
def __init__(self, patroni, config):
self.patroni = patroni
self.__listen = None
self.__ssl_options = None
self.reload_config(config)
self.daemon = True
self.__ssl_serial_number = None
self._received_new_cert = False
def query(self, sql, *params):
cursor = None
try:
with self.patroni.postgresql.connection().cursor() as cursor:
cursor.execute(sql, params)
return [r for r in cursor]
except psycopg2.Error as e:
if cursor and cursor.connection.closed == 0:
raise e
raise PostgresConnectionException('connection problems')
@staticmethod
def _set_fd_cloexec(fd):
if os.name != 'nt':
import fcntl
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def check_basic_auth_key(self, key):
return hmac.compare_digest(self.__auth_key, key.encode('utf-8'))
def check_auth_header(self, auth_header):
if self.__auth_key:
if auth_header is None:
return 'no auth header received'
if not auth_header.startswith('Basic ') or not self.check_basic_auth_key(auth_header[6:]):
return 'not authenticated'
@staticmethod
def __resolve_ips(host, port):
try:
for _, _, _, _, sa in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP):
yield ip_network(sa[0])
except Exception as e:
logger.error('Failed to resolve %s: %r', host, e)
def __members_ips(self):
cluster = self.patroni.dcs.cluster
if self.__allowlist_include_members and cluster:
for member in cluster.members:
if member.api_url:
try:
r = urlparse(member.api_url)
host = r.hostname
port = r.port or (443 if r.scheme == 'https' else 80)
for ip in self.__resolve_ips(host, port):
yield ip
except Exception as e:
logger.debug('Failed to parse url %s: %r', member.api_url, e)
def check_access(self, rh):
if self.__allowlist or self.__allowlist_include_members:
incoming_ip = rh.client_address[0]
incoming_ip = ip_address(incoming_ip.decode('utf-8') if six.PY2 else incoming_ip)
if not any(incoming_ip in net for net in self.__allowlist + tuple(self.__members_ips())):
return rh._write_response(403, 'Access is denied')
if not hasattr(rh.request, 'getpeercert') or not rh.request.getpeercert(): # valid client cert isn't present
if self.__protocol == 'https' and self.__ssl_options.get('verify_client') in ('required', 'optional'):
return rh._write_response(403, 'client certificate required')
reason = self.check_auth_header(rh.headers.get('Authorization'))
if reason:
headers = {'WWW-Authenticate': 'Basic realm="' + self.patroni.__class__.__name__ + '"'}
return rh._write_response(401, reason, headers=headers)
return True
@staticmethod
def __has_dual_stack():
if hasattr(socket, 'AF_INET6') and hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY'):
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False)
return True
except socket.error as e:
logger.debug('Error when working with ipv6 socket: %s', e)
finally:
if sock:
sock.close()
return False
def __httpserver_init(self, host, port):
dual_stack = self.__has_dual_stack()
if host in ('', '*'):
host = None
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
# in case dual stack is not supported we want IPv4 to be preferred over IPv6
info.sort(key=lambda x: x[0] == socket.AF_INET, reverse=not dual_stack)
self.address_family = info[0][0]
try:
HTTPServer.__init__(self, info[0][-1][:2], RestApiHandler)
except socket.error:
logger.error(
"Couldn't start a service on '%s:%s', please check your `restapi.listen` configuration", host, port)
raise
def __initialize(self, listen, ssl_options):
try:
host, port = split_host_port(listen, None)
except Exception:
raise ValueError('Invalid "restapi" config: expected <HOST>:<PORT> for "listen", but got "{0}"'
.format(listen))
reloading_config = self.__listen is not None # changing config in runtime
if reloading_config:
self.shutdown()
self.__listen = listen
self.__ssl_options = ssl_options
self._received_new_cert = False # reset to False after reload_config()
self.__httpserver_init(host, port)
Thread.__init__(self, target=self.serve_forever)
self._set_fd_cloexec(self.socket)
# wrap socket with ssl if 'certfile' is defined in a config.yaml
# Sometime it's also needed to pass reference to a 'keyfile'.
self.__protocol = 'https' if ssl_options.get('certfile') else 'http'
if self.__protocol == 'https':
import ssl
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=ssl_options.get('cafile'))
if ssl_options.get('ciphers'):
ctx.set_ciphers(ssl_options['ciphers'])
ctx.load_cert_chain(certfile=ssl_options['certfile'], keyfile=ssl_options.get('keyfile'),
password=ssl_options.get('keyfile_password'))
verify_client = ssl_options.get('verify_client')
if verify_client:
modes = {'none': ssl.CERT_NONE, 'optional': ssl.CERT_OPTIONAL, 'required': ssl.CERT_REQUIRED}
if verify_client in modes:
ctx.verify_mode = modes[verify_client]
else:
logger.error('Bad value in the "restapi.verify_client": %s', verify_client)
self.__ssl_serial_number = self.get_certificate_serial_number()
self.socket = ctx.wrap_socket(self.socket, server_side=True)
if reloading_config:
self.start()
def process_request_thread(self, request, client_address):
if isinstance(request, tuple):
sock, newsock = request
try:
request = sock.context.wrap_socket(newsock, do_handshake_on_connect=sock.do_handshake_on_connect,
suppress_ragged_eofs=sock.suppress_ragged_eofs, server_side=True)
except socket.error:
return
super(RestApiServer, self).process_request_thread(request, client_address)
def get_request(self):
sock = self.socket
newsock, addr = socket.socket.accept(sock)
enable_keepalive(newsock, 10, 3)
if hasattr(sock, 'context'): # SSLSocket, we want to do the deferred handshake from a thread
newsock = (sock, newsock)
return newsock, addr
def shutdown_request(self, request):
if isinstance(request, tuple):
_, request = request # SSLSocket
return super(RestApiServer, self).shutdown_request(request)
def get_certificate_serial_number(self):
if self.__ssl_options.get('certfile'):
import ssl
try:
crt = ssl._ssl._test_decode_cert(self.__ssl_options['certfile'])
return crt.get('serialNumber')
except ssl.SSLError as e:
logger.error('Failed to get serial number from certificate %s: %r', self.__ssl_options['certfile'], e)
def reload_local_certificate(self):
if self.__protocol == 'https':
on_disk_cert_serial_number = self.get_certificate_serial_number()
if on_disk_cert_serial_number != self.__ssl_serial_number:
self._received_new_cert = True
self.__ssl_serial_number = on_disk_cert_serial_number
return True
def _build_allowlist(self, value):
if isinstance(value, list):
for v in value:
if '/' in v: # netmask
try:
yield ip_network(v)
except Exception as e:
logger.error('Invalid value "%s" in the allowlist: %r', v, e)
else: # ip or hostname, try to resolve it
for ip in self.__resolve_ips(v, 8080):
yield ip
def reload_config(self, config):
if 'listen' not in config: # changing config in runtime
raise ValueError('Can not find "restapi.listen" config')
self.__allowlist = tuple(self._build_allowlist(config.get('allowlist')))
self.__allowlist_include_members = config.get('allowlist_include_members')
ssl_options = {n: config[n] for n in ('certfile', 'keyfile', 'keyfile_password',
'cafile', 'ciphers') if n in config}
self.http_extra_headers = config.get('http_extra_headers') or {}
self.http_extra_headers.update((config.get('https_extra_headers') or {}) if ssl_options.get('certfile') else {})
if isinstance(config.get('verify_client'), six.string_types):
ssl_options['verify_client'] = config['verify_client'].lower()
if self.__listen != config['listen'] or self.__ssl_options != ssl_options or self._received_new_cert:
self.__initialize(config['listen'], ssl_options)
self.__auth_key = base64.b64encode(config['auth'].encode('utf-8')) if 'auth' in config else None
self.connection_string = uri(self.__protocol, config.get('connect_address') or self.__listen, 'patroni')
@staticmethod
def handle_error(request, client_address):
address, port = client_address
logger.warning('Exception happened during processing of request from {}:{}'.format(address, port))
logger.warning(traceback.format_exc())
|
mit
|
mmazanec22/too-windy
|
env/lib/python3.5/site-packages/pip/_vendor/lockfile/linklockfile.py
|
536
|
2652
|
from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class LinkLockFile(LockBase):
"""Lock access to a file using atomic property of link(2).
>>> lock = LinkLockFile('somefile')
>>> lock = LinkLockFile('somefile', threaded=False)
"""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
|
gpl-3.0
|
heke123/chromium-crosswalk
|
third_party/pylint/pylint/checkers/design_analysis.py
|
59
|
14962
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""check for signs of poor design"""
import re
from collections import defaultdict
from astroid import Function, If, InferenceError
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile('_.*')
def class_is_abstract(klass):
"""return true if the given class node should be considered as an abstract
class
"""
for attr in klass.values():
if isinstance(attr, Function):
if attr.is_abstract(pass_is_abstract=False):
return True
return False
MSGS = {
'R0901': ('Too many ancestors (%s/%s)',
'too-many-ancestors',
'Used when class has too many parent classes, try to reduce \
this to get a simpler (and so easier to use) class.'),
'R0902': ('Too many instance attributes (%s/%s)',
'too-many-instance-attributes',
'Used when class has too many instance attributes, try to reduce \
this to get a simpler (and so easier to use) class.'),
'R0903': ('Too few public methods (%s/%s)',
'too-few-public-methods',
'Used when class has too few public methods, so be sure it\'s \
really worth it.'),
'R0904': ('Too many public methods (%s/%s)',
'too-many-public-methods',
'Used when class has too many public methods, try to reduce \
this to get a simpler (and so easier to use) class.'),
'R0911': ('Too many return statements (%s/%s)',
'too-many-return-statements',
'Used when a function or method has too many return statement, \
making it hard to follow.'),
'R0912': ('Too many branches (%s/%s)',
'too-many-branches',
'Used when a function or method has too many branches, \
making it hard to follow.'),
'R0913': ('Too many arguments (%s/%s)',
'too-many-arguments',
'Used when a function or method takes too many arguments.'),
'R0914': ('Too many local variables (%s/%s)',
'too-many-locals',
'Used when a function or method has too many local variables.'),
'R0915': ('Too many statements (%s/%s)',
'too-many-statements',
'Used when a function or method has too many statements. You \
should then split it in smaller functions / methods.'),
'R0921': ('Abstract class not referenced',
'abstract-class-not-used',
'Used when an abstract class is not used as ancestor anywhere.'),
'R0922': ('Abstract class is only referenced %s times',
'abstract-class-little-used',
'Used when an abstract class is used less than X times as \
ancestor.'),
'R0923': ('Interface not implemented',
'interface-not-implemented',
'Used when an interface class is not implemented anywhere.'),
}
class MisdesignChecker(BaseChecker):
"""checks for sign of poor/misdesign:
* number of methods, attributes, local variables...
* size, complexity of functions, methods
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'design'
# messages
msgs = MSGS
priority = -2
# configuration options
options = (('max-args',
{'default' : 5, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of arguments for function / method'}
),
('ignored-argument-names',
{'default' : IGNORED_ARGUMENT_NAMES,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Argument names that match this expression will be '
'ignored. Default to name with leading underscore'}
),
('max-locals',
{'default' : 15, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of locals for function / method body'}
),
('max-returns',
{'default' : 6, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of return / yield for function / '
'method body'}
),
('max-branches',
{'default' : 12, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of branch for function / method body'}
),
('max-statements',
{'default' : 50, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of statements in function / method '
'body'}
),
('max-parents',
{'default' : 7,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Maximum number of parents for a class (see R0901).'}
),
('max-attributes',
{'default' : 7,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Maximum number of attributes for a class \
(see R0902).'}
),
('min-public-methods',
{'default' : 2,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Minimum number of public methods for a class \
(see R0903).'}
),
('max-public-methods',
{'default' : 20,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Maximum number of public methods for a class \
(see R0904).'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self.stats = None
self._returns = None
self._branches = None
self._used_abstracts = None
self._used_ifaces = None
self._abstracts = None
self._ifaces = None
self._stmts = 0
def open(self):
"""initialize visit variables"""
self.stats = self.linter.add_stats()
self._returns = []
self._branches = defaultdict(int)
self._used_abstracts = {}
self._used_ifaces = {}
self._abstracts = []
self._ifaces = []
# Check 'R0921', 'R0922', 'R0923'
def close(self):
"""check that abstract/interface classes are used"""
for abstract in self._abstracts:
if not abstract in self._used_abstracts:
self.add_message('abstract-class-not-used', node=abstract)
elif self._used_abstracts[abstract] < 2:
self.add_message('abstract-class-little-used', node=abstract,
args=self._used_abstracts[abstract])
for iface in self._ifaces:
if not iface in self._used_ifaces:
self.add_message('interface-not-implemented', node=iface)
@check_messages('too-many-ancestors', 'too-many-instance-attributes',
'too-few-public-methods', 'too-many-public-methods',
'abstract-class-not-used', 'abstract-class-little-used',
'interface-not-implemented')
def visit_class(self, node):
"""check size of inheritance hierarchy and number of instance attributes
"""
# Is the total inheritance hierarchy is 7 or less?
nb_parents = len(list(node.ancestors()))
if nb_parents > self.config.max_parents:
self.add_message('too-many-ancestors', node=node,
args=(nb_parents, self.config.max_parents))
# Does the class contain less than 20 attributes for
# non-GUI classes (40 for GUI)?
# FIXME detect gui classes
if len(node.instance_attrs) > self.config.max_attributes:
self.add_message('too-many-instance-attributes', node=node,
args=(len(node.instance_attrs),
self.config.max_attributes))
# update abstract / interface classes structures
if class_is_abstract(node):
self._abstracts.append(node)
elif node.type == 'interface' and node.name != 'Interface':
self._ifaces.append(node)
for parent in node.ancestors(False):
if parent.name == 'Interface':
continue
self._used_ifaces[parent] = 1
try:
for iface in node.interfaces():
self._used_ifaces[iface] = 1
except InferenceError:
# XXX log ?
pass
for parent in node.ancestors():
try:
self._used_abstracts[parent] += 1
except KeyError:
self._used_abstracts[parent] = 1
@check_messages('too-many-ancestors', 'too-many-instance-attributes',
'too-few-public-methods', 'too-many-public-methods',
'abstract-class-not-used', 'abstract-class-little-used',
'interface-not-implemented')
def leave_class(self, node):
"""check number of public methods"""
nb_public_methods = 0
for method in node.mymethods():
if not method.name.startswith('_'):
nb_public_methods += 1
# Does the class contain less than 20 public methods ?
if nb_public_methods > self.config.max_public_methods:
self.add_message('too-many-public-methods', node=node,
args=(nb_public_methods,
self.config.max_public_methods))
# stop here for exception, metaclass and interface classes
if node.type != 'class':
return
# Does the class contain more than 5 public methods ?
if nb_public_methods < self.config.min_public_methods:
self.add_message('too-few-public-methods', node=node,
args=(nb_public_methods,
self.config.min_public_methods))
@check_messages('too-many-return-statements', 'too-many-branches',
'too-many-arguments', 'too-many-locals',
'too-many-statements')
def visit_function(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
# init branch and returns counters
self._returns.append(0)
# check number of arguments
args = node.args.args
if args is not None:
ignored_args_num = len(
[arg for arg in args
if self.config.ignored_argument_names.match(arg.name)])
argnum = len(args) - ignored_args_num
if argnum > self.config.max_args:
self.add_message('too-many-arguments', node=node,
args=(len(args), self.config.max_args))
else:
ignored_args_num = 0
# check number of local variables
locnum = len(node.locals) - ignored_args_num
if locnum > self.config.max_locals:
self.add_message('too-many-locals', node=node,
args=(locnum, self.config.max_locals))
# init statements counter
self._stmts = 1
@check_messages('too-many-return-statements', 'too-many-branches',
'too-many-arguments', 'too-many-locals',
'too-many-statements')
def leave_function(self, node):
"""most of the work is done here on close:
checks for max returns, branch, return in __init__
"""
returns = self._returns.pop()
if returns > self.config.max_returns:
self.add_message('too-many-return-statements', node=node,
args=(returns, self.config.max_returns))
branches = self._branches[node]
if branches > self.config.max_branches:
self.add_message('too-many-branches', node=node,
args=(branches, self.config.max_branches))
# check number of statements
if self._stmts > self.config.max_statements:
self.add_message('too-many-statements', node=node,
args=(self._stmts, self.config.max_statements))
def visit_return(self, _):
"""count number of returns"""
if not self._returns:
return # return outside function, reported by the base checker
self._returns[-1] += 1
def visit_default(self, node):
"""default visit method -> increments the statements counter if
necessary
"""
if node.is_statement:
self._stmts += 1
def visit_tryexcept(self, node):
"""increments the branches counter"""
branches = len(node.handlers)
if node.orelse:
branches += 1
self._inc_branch(node, branches)
self._stmts += branches
def visit_tryfinally(self, node):
"""increments the branches counter"""
self._inc_branch(node, 2)
self._stmts += 2
def visit_if(self, node):
"""increments the branches counter"""
branches = 1
# don't double count If nodes coming from some 'elif'
if node.orelse and (len(node.orelse) > 1 or
not isinstance(node.orelse[0], If)):
branches += 1
self._inc_branch(node, branches)
self._stmts += branches
def visit_while(self, node):
"""increments the branches counter"""
branches = 1
if node.orelse:
branches += 1
self._inc_branch(node, branches)
visit_for = visit_while
def _inc_branch(self, node, branchesnum=1):
"""increments the branches counter"""
self._branches[node.scope()] += branchesnum
# FIXME: make a nice report...
def register(linter):
"""required method to auto register this checker """
linter.register_checker(MisdesignChecker(linter))
|
bsd-3-clause
|
sve-odoo/odoo
|
addons/mrp/mrp.py
|
9
|
64294
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import float_compare
from openerp.tools.translate import _
from openerp import tools, SUPERUSER_ID
from openerp.addons.product import _common
class mrp_property_group(osv.osv):
"""
Group of mrp properties.
"""
_name = 'mrp.property.group'
_description = 'Property Group'
_columns = {
'name': fields.char('Property Group', required=True),
'description': fields.text('Description'),
}
class mrp_property(osv.osv):
"""
Properties of mrp.
"""
_name = 'mrp.property'
_description = 'Property'
_columns = {
'name': fields.char('Name', required=True),
'composition': fields.selection([('min','min'),('max','max'),('plus','plus')], 'Properties composition', required=True, help="Not used in computations, for information purpose only."),
'group_id': fields.many2one('mrp.property.group', 'Property Group', required=True),
'description': fields.text('Description'),
}
_defaults = {
'composition': lambda *a: 'min',
}
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class mrp_workcenter(osv.osv):
_name = 'mrp.workcenter'
_description = 'Work Center'
_inherits = {'resource.resource':"resource_id"}
_columns = {
'note': fields.text('Description', help="Description of the Work Center. Explain here what's a cycle according to this Work Center."),
'capacity_per_cycle': fields.float('Capacity per Cycle', help="Number of operations this Work Center can do in parallel. If this Work Center represents a team of 5 workers, the capacity per cycle is 5."),
'time_cycle': fields.float('Time for 1 cycle (hour)', help="Time in hours for doing one cycle."),
'time_start': fields.float('Time before prod.', help="Time in hours for the setup."),
'time_stop': fields.float('Time after prod.', help="Time in hours for the cleaning."),
'costs_hour': fields.float('Cost per hour', help="Specify Cost of Work Center per hour."),
'costs_hour_account_id': fields.many2one('account.analytic.account', 'Hour Account', domain=[('type','!=','view')],
help="Fill this only if you want automatic analytic accounting entries on production orders."),
'costs_cycle': fields.float('Cost per cycle', help="Specify Cost of Work Center per cycle."),
'costs_cycle_account_id': fields.many2one('account.analytic.account', 'Cycle Account', domain=[('type','!=','view')],
help="Fill this only if you want automatic analytic accounting entries on production orders."),
'costs_journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal'),
'costs_general_account_id': fields.many2one('account.account', 'General Account', domain=[('type','!=','view')]),
'resource_id': fields.many2one('resource.resource','Resource', ondelete='cascade', required=True),
'product_id': fields.many2one('product.product','Work Center Product', help="Fill this product to easily track your production costs in the analytic accounting."),
}
_defaults = {
'capacity_per_cycle': 1.0,
'resource_type': 'material',
}
def on_change_product_cost(self, cr, uid, ids, product_id, context=None):
value = {}
if product_id:
cost = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'costs_hour': cost.standard_price}
return {'value': value}
class mrp_routing(osv.osv):
"""
For specifying the routings of Work Centers.
"""
_name = 'mrp.routing'
_description = 'Routing'
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the routing without removing it."),
'code': fields.char('Code', size=8),
'note': fields.text('Description'),
'workcenter_lines': fields.one2many('mrp.routing.workcenter', 'routing_id', 'Work Centers', copy=True),
'location_id': fields.many2one('stock.location', 'Production Location',
help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations."
),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'active': lambda *a: 1,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.routing', context=context)
}
class mrp_routing_workcenter(osv.osv):
"""
Defines working cycles and hours of a Work Center using routings.
"""
_name = 'mrp.routing.workcenter'
_description = 'Work Center Usage'
_order = 'sequence'
_columns = {
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of routing Work Centers."),
'cycle_nbr': fields.float('Number of Cycles', required=True,
help="Number of iterations this work center has to do in the specified operation of the routing."),
'hour_nbr': fields.float('Number of Hours', required=True, help="Time in hours for this Work Center to achieve the operation of the specified routing."),
'routing_id': fields.many2one('mrp.routing', 'Parent Routing', select=True, ondelete='cascade',
help="Routing indicates all the Work Centers used, for how long and/or cycles." \
"If Routing is indicated then,the third tab of a production order (Work Centers) will be automatically pre-completed."),
'note': fields.text('Description'),
'company_id': fields.related('routing_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'cycle_nbr': lambda *a: 1.0,
'hour_nbr': lambda *a: 0.0,
}
class mrp_bom(osv.osv):
"""
Defines bills of material for a product.
"""
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit = ['mail.thread']
def _child_compute(self, cr, uid, ids, name, arg, context=None):
""" Gets child bom.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param name: Name of the field
@param arg: User defined argument
@param context: A standard dictionary for contextual values
@return: Dictionary of values
"""
result = {}
if context is None:
context = {}
bom_obj = self.pool.get('mrp.bom')
bom_id = context and context.get('active_id', False) or False
cr.execute('select id from mrp_bom')
if all(bom_id != r[0] for r in cr.fetchall()):
ids.sort()
bom_id = ids[0]
bom_parent = bom_obj.browse(cr, uid, bom_id, context=context)
for bom in self.browse(cr, uid, ids, context=context):
if (bom_parent) or (bom.id == bom_id):
result[bom.id] = map(lambda x: x.id, bom.bom_line_ids)
else:
result[bom.id] = []
if bom.bom_line_ids:
continue
ok = ((name=='child_complete_ids'))
if (bom.type=='phantom' or ok):
sids = bom_obj.search(cr, uid, [('product_tmpl_id','=',bom.product_tmpl_id.id)])
if sids:
bom2 = bom_obj.browse(cr, uid, sids[0], context=context)
result[bom.id] += map(lambda x: x.id, bom2.bom_line_ids)
return result
_columns = {
'name': fields.char('Name'),
'code': fields.char('Reference', size=16),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the bills of material without removing it."),
'type': fields.selection([('normal', 'Normal'), ('phantom', 'Set')], 'BoM Type', required=True,
help= "Set: When processing a sales order for this product, the delivery order will contain the raw materials, instead of the finished product."),
'position': fields.char('Internal Reference', help="Reference to a position in an external plan."),
'product_tmpl_id': fields.many2one('product.template', 'Product', required=True),
'product_id': fields.many2one('product.product', 'Product Variant',
domain="[('product_tmpl_id','=',product_tmpl_id)]",
help="If a product variant is defined the BOM is available only for this product."),
'bom_line_ids': fields.one2many('mrp.bom.line', 'bom_id', 'BoM Lines', copy=True),
'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"),
'date_start': fields.date('Valid From', help="Validity of this BoM. Keep empty if it's always valid."),
'date_stop': fields.date('Valid Until', help="Validity of this BoM. Keep empty if it's always valid."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of bills of material."),
'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. "\
"The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."),
'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."),
'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% during the production process."),
'property_ids': fields.many2many('mrp.property', string='Properties'),
'child_complete_ids': fields.function(_child_compute, relation='mrp.bom', string="BoM Hierarchy", type='many2many'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
_defaults = {
'active': lambda *a: 1,
'product_qty': lambda *a: 1.0,
'product_efficiency': lambda *a: 1.0,
'product_rounding': lambda *a: 0.0,
'type': lambda *a: 'normal',
'product_uom': _get_uom_id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.bom', context=c),
}
_order = "sequence"
def _bom_find(self, cr, uid, product_uom, product_tmpl_id=None, product_id=None, properties=None):
""" Finds BoM for particular product and product uom.
@param product_tmpl_id: Selected product.
@param product_uom: Unit of measure of a product.
@param properties: List of related properties.
@return: False or BoM id.
"""
if properties is None:
properties = []
domain = None
if product_id:
domain = ['|',('product_id', '=', product_id),('product_tmpl_id.product_variant_ids', '=', product_id)]
else:
domain = [('product_id', '=', False), ('product_tmpl_id', '=', product_tmpl_id)]
if product_uom:
domain += [('product_uom','=',product_uom)]
domain = domain + [ '|', ('date_start', '=', False), ('date_start', '<=', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
'|', ('date_stop', '=', False), ('date_stop', '>=', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
ids = self.search(cr, uid, domain)
for bom in self.pool.get('mrp.bom').browse(cr, uid, ids):
if not set(map(int,bom.property_ids or [])) - set(properties or []):
return bom.id
return False
def _bom_explode(self, cr, uid, bom, product, factor, properties=None, level=0, routing_id=False, previous_products=None, master_bom=None):
""" Finds Products and Work Centers for related BoM for manufacturing order.
@param bom: BoM of particular product template.
@param product: Select a particular variant of the BoM. If False use BoM without variants.
@param factor: Factor of product UoM.
@param properties: A List of properties Ids.
@param level: Depth level to find BoM lines starts from 10.
@param previous_products: List of product previously use by bom explore to avoid recursion
@param master_bom: When recursion, used to display the name of the master bom
@return: result: List of dictionaries containing product details.
result2: List of dictionaries containing Work Center details.
"""
routing_obj = self.pool.get('mrp.routing')
all_prod = [] + (previous_products or [])
master_bom = master_bom or bom
def _factor(factor, product_efficiency, product_rounding):
factor = factor / (product_efficiency or 1.0)
factor = _common.ceiling(factor, product_rounding)
if factor < product_rounding:
factor = product_rounding
return factor
factor = _factor(factor, bom.product_efficiency, bom.product_rounding)
result = []
result2 = []
routing = (routing_id and routing_obj.browse(cr, uid, routing_id)) or bom.routing_id or False
if routing:
for wc_use in routing.workcenter_lines:
wc = wc_use.workcenter_id
d, m = divmod(factor, wc_use.workcenter_id.capacity_per_cycle)
mult = (d + (m and 1.0 or 0.0))
cycle = mult * wc_use.cycle_nbr
result2.append({
'name': tools.ustr(wc_use.name) + ' - ' + tools.ustr(bom.product_tmpl_id.name_get()[0][1]),
'workcenter_id': wc.id,
'sequence': level + (wc_use.sequence or 0),
'cycle': cycle,
'hour': float(wc_use.hour_nbr * mult + ((wc.time_start or 0.0) + (wc.time_stop or 0.0) + cycle * (wc.time_cycle or 0.0)) * (wc.time_efficiency or 1.0)),
})
for bom_line_id in bom.bom_line_ids:
if bom_line_id.date_start and bom_line_id.date_start > time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or \
bom_line_id.date_stop and bom_line_id.date_stop > time.strftime(DEFAULT_SERVER_DATETIME_FORMAT):
continue
# check properties
if set(map(int,bom_line_id.property_ids or [])) - set(properties or []):
continue
# all bom_line_id variant values must be in the product
if bom_line_id.attribute_value_ids:
if not product or (set(map(int,bom_line_id.attribute_value_ids or [])) - set(map(int,product.attribute_value_ids))):
continue
if bom_line_id.product_id.id in all_prod:
raise osv.except_osv(_('Invalid Action!'), _('BoM "%s" contains a BoM line with a product recursion: "%s".') % (master_bom.name,bom_line_id.product_id.name_get()[0][1]))
all_prod.append(bom_line_id.product_id.id)
if bom_line_id.type != "phantom":
result.append({
'name': bom_line_id.product_id.name,
'product_id': bom_line_id.product_id.id,
'product_qty': _factor(bom_line_id.product_qty * factor, bom_line_id.product_efficiency, bom_line_id.product_rounding),
'product_uom': bom_line_id.product_uom.id,
'product_uos_qty': bom_line_id.product_uos and bom_line_id.product_uos_qty * factor or False,
'product_uos': bom_line_id.product_uos and bom_line_id.product_uos.id or False,
})
else:
bom_id = self._bom_find(cr, uid, bom_line_id.product_uom.id, product_id=bom_line_id.product_id.id, properties=properties)
if bom_id:
bom2 = self.browse(cr, uid, bom_id)
res = self._bom_explode(cr, uid, bom2, bom_line_id.product_id, factor,
properties=properties, level=level + 10, previous_products=all_prod, master_bom=master_bom)
result = result + res[0]
result2 = result2 + res[1]
else:
raise osv.except_osv(_('Invalid Action!'), _('BoM "%s" contains a phantom BoM line but the product "%s" don\'t have any BoM defined.') % (master_bom.name,bom_line_id.product_id.name_get()[0][1]))
return result, result2
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
bom_data = self.read(cr, uid, id, [], context=context)
default.update(name=_("%s (copy)") % (bom_data['name']))
return super(mrp_bom, self).copy_data(cr, uid, id, default, context=context)
def onchange_uom(self, cr, uid, ids, product_tmpl_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_tmpl_id:
return res
product = self.pool.get('product.template').browse(cr, uid, product_tmpl_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_product_tmpl_id(self, cr, uid, ids, product_tmpl_id, product_qty=0, context=None):
""" Changes UoM and name if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
res = {}
if product_tmpl_id:
prod = self.pool.get('product.template').browse(cr, uid, product_tmpl_id, context=context)
res['value'] = {
'name': prod.name,
'product_uom': prod.uom_id.id,
}
return res
class mrp_bom_line(osv.osv):
_name = 'mrp.bom.line'
_order = "sequence"
_columns = {
'type': fields.selection([('normal', 'Normal'), ('phantom', 'Phantom')], 'BoM Line Type', required=True,
help="Phantom: this product line will not appear in the raw materials of manufacturing orders,"
"it will be directly replaced by the raw materials of its own BoM, without triggering"
"an extra manufacturing order."),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_uos_qty': fields.float('Product UOS Qty'),
'product_uos': fields.many2one('product.uom', 'Product UOS', help="Product UOS (Unit of Sale) is the unit of measurement for the invoicing and promotion of stock."),
'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True,
help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"),
'date_start': fields.date('Valid From', help="Validity of component. Keep empty if it's always valid."),
'date_stop': fields.date('Valid Until', help="Validity of component. Keep empty if it's always valid."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying."),
'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."),
'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."),
'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% within the production process."),
'property_ids': fields.many2many('mrp.property', string='Properties'),
'bom_id': fields.many2one('mrp.bom', 'Parent BoM', ondelete='cascade', select=True, required=True),
'attribute_value_ids': fields.many2many('product.attribute.value', string='Variants', help="BOM Product Variants needed form apply this line."),
}
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
_defaults = {
'product_qty': lambda *a: 1.0,
'product_efficiency': lambda *a: 1.0,
'product_rounding': lambda *a: 0.0,
'type': lambda *a: 'normal',
'product_uom': _get_uom_id,
}
_sql_constraints = [
('bom_qty_zero', 'CHECK (product_qty>0)', 'All product quantities must be greater than 0.\n' \
'You should install the mrp_byproduct module if you want to manage extra products on BoMs !'),
]
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_product_id(self, cr, uid, ids, product_id, product_qty=0, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
res = {}
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['value'] = {
'product_uom': prod.uom_id.id,
'product_uos_qty': 0,
'product_uos': False
}
if prod.uos_id.id:
res['value']['product_uos_qty'] = product_qty * prod.uos_coeff
res['value']['product_uos'] = prod.uos_id.id
return res
class mrp_production(osv.osv):
"""
Production Orders / Manufacturing Orders
"""
_name = 'mrp.production'
_description = 'Manufacturing Order'
_date_name = 'date_planned'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _production_calc(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates total hours and total no. of cycles for a production order.
@param prop: Name of field.
@param unknow_none:
@return: Dictionary of values.
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = {
'hour_total': 0.0,
'cycle_total': 0.0,
}
for wc in prod.workcenter_lines:
result[prod.id]['hour_total'] += wc.hour
result[prod.id]['cycle_total'] += wc.cycle
return result
def _src_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _dest_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _get_progress(self, cr, uid, ids, name, arg, context=None):
""" Return product quantity percentage """
result = dict.fromkeys(ids, 100)
for mrp_production in self.browse(cr, uid, ids, context=context):
if mrp_production.product_qty:
done = 0.0
for move in mrp_production.move_created_ids2:
if not move.scrapped and move.product_id == mrp_production.product_id:
done += move.product_qty
result[mrp_production.id] = done / mrp_production.product_qty * 100
return result
def _moves_assigned(self, cr, uid, ids, name, arg, context=None):
""" Test whether all the consume lines are assigned """
res = {}
for production in self.browse(cr, uid, ids, context=context):
res[production.id] = True
states = [x.state != 'assigned' for x in production.move_lines if x]
if any(states) or len(states) == 0:
res[production.id] = False
return res
def _mrp_from_move(self, cr, uid, ids, context=None):
""" Return mrp"""
res = []
for move in self.browse(cr, uid, ids, context=context):
res += self.pool.get("mrp.production").search(cr, uid, [('move_lines', 'in', move.id)], context=context)
return res
_columns = {
'name': fields.char('Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'origin': fields.char('Source Document', readonly=True, states={'draft': [('readonly', False)]},
help="Reference of the document that generated this production order request.", copy=False),
'priority': fields.selection([('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')], 'Priority',
select=True, readonly=True, states=dict.fromkeys(['draft', 'confirmed'], [('readonly', False)])),
'product_id': fields.many2one('product.product', 'Product', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Product UoS Quantity', readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS', readonly=True, states={'draft': [('readonly', False)]}),
'progress': fields.function(_get_progress, type='float',
string='Production progress'),
'location_src_id': fields.many2one('stock.location', 'Raw Materials Location', required=True,
readonly=True, states={'draft': [('readonly', False)]},
help="Location where the system will look for components."),
'location_dest_id': fields.many2one('stock.location', 'Finished Products Location', required=True,
readonly=True, states={'draft': [('readonly', False)]},
help="Location where the system will stock the finished products."),
'date_planned': fields.datetime('Scheduled Date', required=True, select=1, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'date_start': fields.datetime('Start Date', select=True, readonly=True, copy=False),
'date_finished': fields.datetime('End Date', select=True, readonly=True, copy=False),
'bom_id': fields.many2one('mrp.bom', 'Bill of Material', readonly=True, states={'draft': [('readonly', False)]},
help="Bill of Materials allow you to define the list of required raw materials to make a finished product."),
'routing_id': fields.many2one('mrp.routing', string='Routing', on_delete='set null', readonly=True, states={'draft': [('readonly', False)]},
help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production plannification."),
'move_prod_id': fields.many2one('stock.move', 'Product Move', readonly=True, copy=False),
'move_lines': fields.one2many('stock.move', 'raw_material_production_id', 'Products to Consume',
domain=[('state', 'not in', ('done', 'cancel'))], readonly=True, states={'draft': [('readonly', False)]}),
'move_lines2': fields.one2many('stock.move', 'raw_material_production_id', 'Consumed Products',
domain=[('state', 'in', ('done', 'cancel'))], readonly=True),
'move_created_ids': fields.one2many('stock.move', 'production_id', 'Products to Produce',
domain=[('state', 'not in', ('done', 'cancel'))], readonly=True),
'move_created_ids2': fields.one2many('stock.move', 'production_id', 'Produced Products',
domain=[('state', 'in', ('done', 'cancel'))], readonly=True),
'product_lines': fields.one2many('mrp.production.product.line', 'production_id', 'Scheduled goods',
readonly=True),
'workcenter_lines': fields.one2many('mrp.production.workcenter.line', 'production_id', 'Work Centers Utilisation',
readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('confirmed', 'Awaiting Raw Materials'),
('ready', 'Ready to Produce'), ('in_production', 'Production Started'), ('done', 'Done')],
string='Status', readonly=True,
track_visibility='onchange', copy=False,
help="When the production order is created the status is set to 'Draft'.\n\
If the order is confirmed the status is set to 'Waiting Goods'.\n\
If any exceptions are there, the status is set to 'Picking Exception'.\n\
If the stock is available then the status is set to 'Ready to Produce'.\n\
When the production gets started then the status is set to 'In Production'.\n\
When the production is over, the status is set to 'Done'."),
'hour_total': fields.function(_production_calc, type='float', string='Total Hours', multi='workorder', store=True),
'cycle_total': fields.function(_production_calc, type='float', string='Total Cycles', multi='workorder', store=True),
'user_id': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'ready_production': fields.function(_moves_assigned, type='boolean', store={'stock.move': (_mrp_from_move, ['state'], 10)}),
}
_defaults = {
'priority': lambda *a: '1',
'state': lambda *a: 'draft',
'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'product_qty': lambda *a: 1.0,
'user_id': lambda self, cr, uid, c: uid,
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'mrp.production') or '/',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.production', context=c),
'location_src_id': _src_id_default,
'location_dest_id': _dest_id_default
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
_order = 'priority desc, date_planned asc'
def _check_qty(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
if order.product_qty <= 0:
return False
return True
_constraints = [
(_check_qty, 'Order quantity cannot be negative or zero!', ['product_qty']),
]
def unlink(self, cr, uid, ids, context=None):
for production in self.browse(cr, uid, ids, context=context):
if production.state not in ('draft', 'cancel'):
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a manufacturing order in state \'%s\'.') % production.state)
return super(mrp_production, self).unlink(cr, uid, ids, context=context)
def location_id_change(self, cr, uid, ids, src, dest, context=None):
""" Changes destination location if source location is changed.
@param src: Source location id.
@param dest: Destination location id.
@return: Dictionary of values.
"""
if dest:
return {}
if src:
return {'value': {'location_dest_id': src}}
return {}
def product_id_change(self, cr, uid, ids, product_id, product_qty=0, context=None):
""" Finds UoM of changed product.
@param product_id: Id of changed product.
@return: Dictionary of values.
"""
result = {}
if not product_id:
return {'value': {
'product_uom': False,
'bom_id': False,
'routing_id': False,
'product_uos_qty': 0,
'product_uos': False
}}
bom_obj = self.pool.get('mrp.bom')
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
bom_id = bom_obj._bom_find(cr, uid, product.uom_id and product.uom_id.id, product_id=product.id, properties=[])
routing_id = False
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
product_uom_id = product.uom_id and product.uom_id.id or False
result['value'] = {'product_uos_qty': 0, 'product_uos': False, 'product_uom': product_uom_id, 'bom_id': bom_id, 'routing_id': routing_id}
if product.uos_id.id:
result['value']['product_uos_qty'] = product_qty * product.uos_coeff
result['value']['product_uos'] = product.uos_id.id
return result
def bom_id_change(self, cr, uid, ids, bom_id, context=None):
""" Finds routing for changed BoM.
@param product: Id of product.
@return: Dictionary of values.
"""
if not bom_id:
return {'value': {
'routing_id': False
}}
bom_point = self.pool.get('mrp.bom').browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
result = {
'routing_id': routing_id
}
return {'value': result}
def _action_compute_lines(self, cr, uid, ids, properties=None, context=None):
""" Compute product_lines and workcenter_lines from BoM structure
@return: product_lines
"""
if properties is None:
properties = []
results = []
bom_obj = self.pool.get('mrp.bom')
uom_obj = self.pool.get('product.uom')
prod_line_obj = self.pool.get('mrp.production.product.line')
workcenter_line_obj = self.pool.get('mrp.production.workcenter.line')
for production in self.browse(cr, uid, ids, context=context):
#unlink product_lines
prod_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.product_lines], context=context)
#unlink workcenter_lines
workcenter_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.workcenter_lines], context=context)
# search BoM structure and route
bom_point = production.bom_id
bom_id = production.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, production.product_uom.id, product_id=production.product_id.id, properties=properties)
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id)
routing_id = bom_point.routing_id.id or False
self.write(cr, uid, [production.id], {'bom_id': bom_id, 'routing_id': routing_id})
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find a bill of material for this product."))
# get components and workcenter_lines from BoM structure
factor = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, bom_point.product_uom.id)
# product_lines, workcenter_lines
results, results2 = bom_obj._bom_explode(cr, uid, bom_point, production.product_id, factor / bom_point.product_qty, properties, routing_id=production.routing_id.id)
# reset product_lines in production order
for line in results:
line['production_id'] = production.id
prod_line_obj.create(cr, uid, line)
#reset workcenter_lines in production order
for line in results2:
line['production_id'] = production.id
workcenter_line_obj.create(cr, uid, line)
return results
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
return len(self._action_compute_lines(cr, uid, ids, properties=properties, context=context))
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the production order and related stock moves.
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
for production in self.browse(cr, uid, ids, context=context):
if production.move_created_ids:
move_obj.action_cancel(cr, uid, [x.id for x in production.move_created_ids])
move_obj.action_cancel(cr, uid, [x.id for x in production.move_lines])
self.write(cr, uid, ids, {'state': 'cancel'})
# Put related procurements in exception
proc_obj = self.pool.get("procurement.order")
procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)], context=context)
if procs:
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def action_ready(self, cr, uid, ids, context=None):
""" Changes the production state to Ready and location id of stock move.
@return: True
"""
move_obj = self.pool.get('stock.move')
self.write(cr, uid, ids, {'state': 'ready'})
for production in self.browse(cr, uid, ids, context=context):
if not production.move_created_ids:
self._make_production_produce_line(cr, uid, production, context=context)
if production.move_prod_id and production.move_prod_id.location_id.id != production.location_dest_id.id:
move_obj.write(cr, uid, [production.move_prod_id.id],
{'location_id': production.location_dest_id.id})
return True
def action_production_end(self, cr, uid, ids, context=None):
""" Changes production state to Finish and writes finished date.
@return: True
"""
for production in self.browse(cr, uid, ids):
self._costs_generate(cr, uid, production)
write_res = self.write(cr, uid, ids, {'state': 'done', 'date_finished': time.strftime('%Y-%m-%d %H:%M:%S')})
# Check related procurements
proc_obj = self.pool.get("procurement.order")
procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)], context=context)
proc_obj.check(cr, uid, procs, context=context)
return write_res
def test_production_done(self, cr, uid, ids):
""" Tests whether production is done or not.
@return: True or False
"""
res = True
for production in self.browse(cr, uid, ids):
if production.move_lines:
res = False
if production.move_created_ids:
res = False
return res
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
""" Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but if the
module mrp_subproduct is installed, then we must use the move_id to identify the product to produce
and its quantity.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Will be used in mrp_subproduct.
:return: The factor to apply to the quantity that we should produce for the given production order.
"""
return 1
def _get_produced_qty(self, cr, uid, production, context=None):
''' returns the produced quantity of product 'production.product_id' for the given production, in the product UoM
'''
produced_qty = 0
for produced_product in production.move_created_ids2:
if (produced_product.scrapped) or (produced_product.product_id.id != production.product_id.id):
continue
produced_qty += produced_product.product_qty
return produced_qty
def _get_consumed_data(self, cr, uid, production, context=None):
''' returns a dictionary containing for each raw material of the given production, its quantity already consumed (in the raw material UoM)
'''
consumed_data = {}
# Calculate already consumed qtys
for consumed in production.move_lines2:
if consumed.scrapped:
continue
if not consumed_data.get(consumed.product_id.id, False):
consumed_data[consumed.product_id.id] = 0
consumed_data[consumed.product_id.id] += consumed.product_qty
return consumed_data
def _calculate_qty(self, cr, uid, production, product_qty=0.0, context=None):
"""
Calculates the quantity still needed to produce an extra number of products
"""
quant_obj = self.pool.get("stock.quant")
produced_qty = self._get_produced_qty(cr, uid, production, context=context)
consumed_data = self._get_consumed_data(cr, uid, production, context=context)
#In case no product_qty is given, take the remaining qty to produce for the given production
if not product_qty:
product_qty = production.product_qty - produced_qty
dicts = {}
# Find product qty to be consumed and consume it
for scheduled in production.product_lines:
product_id = scheduled.product_id.id
consumed_qty = consumed_data.get(product_id, 0.0)
# qty available for consume and produce
qty_avail = scheduled.product_qty - consumed_qty
if qty_avail <= 0.0:
# there will be nothing to consume for this raw material
continue
if not dicts.get(product_id):
dicts[product_id] = {}
# total qty of consumed product we need after this consumption
total_consume = ((product_qty + produced_qty) * scheduled.product_qty / production.product_qty)
qty = total_consume - consumed_qty
# Search for quants related to this related move
for move in production.move_lines:
if qty <= 0.0:
break
if move.product_id.id != product_id:
continue
q = min(move.product_qty, qty)
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, scheduled.product_id, q, domain=[('qty', '>', 0.0)],
prefered_domain_list=[[('reservation_id', '=', move.id)], [('reservation_id', '=', False)]], context=context)
for quant, quant_qty in quants:
if quant:
lot_id = quant.lot_id.id
if not product_id in dicts.keys():
dicts[product_id] = {lot_id: quant_qty}
elif lot_id in dicts[product_id].keys():
dicts[product_id][lot_id] += quant_qty
else:
dicts[product_id][lot_id] = quant_qty
qty -= quant_qty
if qty > 0:
if dicts[product_id].get(False):
dicts[product_id][False] += qty
else:
dicts[product_id][False] = qty
consume_lines = []
for prod in dicts.keys():
for lot, qty in dicts[prod].items():
consume_lines.append({'product_id': prod, 'product_qty': qty, 'lot_id': lot})
return consume_lines
def action_produce(self, cr, uid, production_id, production_qty, production_mode, wiz=False, context=None):
""" To produce final product based on production mode (consume/consume&produce).
If Production mode is consume, all stock move lines of raw materials will be done/consumed.
If Production mode is consume & produce, all stock move lines of raw materials will be done/consumed
and stock move lines of final product will be also done/produced.
@param production_id: the ID of mrp.production object
@param production_qty: specify qty to produce
@param production_mode: specify production mode (consume/consume&produce).
@param wiz: the mrp produce product wizard, which will tell the amount of consumed products needed
@return: True
"""
stock_mov_obj = self.pool.get('stock.move')
production = self.browse(cr, uid, production_id, context=context)
if not production.move_lines and production.state == 'ready':
# trigger workflow if not products to consume (eg: services)
self.signal_workflow(cr, uid, [production_id], 'button_produce')
produced_qty = self._get_produced_qty(cr, uid, production, context=context)
main_production_move = False
if production_mode == 'consume_produce':
# To produce remaining qty of final product
#vals = {'state':'confirmed'}
#final_product_todo = [x.id for x in production.move_created_ids]
#stock_mov_obj.write(cr, uid, final_product_todo, vals)
#stock_mov_obj.action_confirm(cr, uid, final_product_todo, context)
produced_products = {}
for produced_product in production.move_created_ids2:
if produced_product.scrapped:
continue
if not produced_products.get(produced_product.product_id.id, False):
produced_products[produced_product.product_id.id] = 0
produced_products[produced_product.product_id.id] += produced_product.product_qty
for produce_product in production.move_created_ids:
produced_qty = produced_products.get(produce_product.product_id.id, 0)
subproduct_factor = self._get_subproduct_factor(cr, uid, production.id, produce_product.id, context=context)
rest_qty = (subproduct_factor * production.product_qty) - produced_qty
if float_compare(rest_qty, (subproduct_factor * production_qty), precision_rounding=produce_product.product_id.uom_id.rounding) < 0:
prod_name = produce_product.product_id.name_get()[0][1]
raise osv.except_osv(_('Warning!'), _('You are going to produce total %s quantities of "%s".\nBut you can only produce up to total %s quantities.') % ((subproduct_factor * production_qty), prod_name, rest_qty))
if float_compare(rest_qty, 0, precision_rounding=produce_product.product_id.uom_id.rounding) > 0:
lot_id = False
if wiz:
lot_id = wiz.lot_id.id
new_moves = stock_mov_obj.action_consume(cr, uid, [produce_product.id], (subproduct_factor * production_qty), location_id=produce_product.location_id.id, restrict_lot_id=lot_id, context=context)
stock_mov_obj.write(cr, uid, new_moves, {'production_id': production_id}, context=context)
if produce_product.product_id.id == production.product_id.id and new_moves:
main_production_move = new_moves[0]
if production_mode in ['consume', 'consume_produce']:
if wiz:
consume_lines = []
for cons in wiz.consume_lines:
consume_lines.append({'product_id': cons.product_id.id, 'lot_id': cons.lot_id.id, 'product_qty': cons.product_qty})
else:
consume_lines = self._calculate_qty(cr, uid, production, production_qty, context=context)
for consume in consume_lines:
remaining_qty = consume['product_qty']
for raw_material_line in production.move_lines:
if remaining_qty <= 0:
break
if consume['product_id'] != raw_material_line.product_id.id:
continue
consumed_qty = min(remaining_qty, raw_material_line.product_qty)
stock_mov_obj.action_consume(cr, uid, [raw_material_line.id], consumed_qty, raw_material_line.location_id.id, restrict_lot_id=consume['lot_id'], consumed_for=main_production_move, context=context)
remaining_qty -= consumed_qty
if remaining_qty:
#consumed more in wizard than previously planned
product = self.pool.get('product.product').browse(cr, uid, consume['product_id'], context=context)
extra_move_id = self._make_consume_line_from_data(cr, uid, production, product, product.uom_id.id, remaining_qty, False, 0, context=context)
if extra_move_id:
stock_mov_obj.action_done(cr, uid, [extra_move_id], context=context)
self.message_post(cr, uid, production_id, body=_("%s produced") % self._description, context=context)
self.signal_workflow(cr, uid, [production_id], 'button_produce_done')
return True
def _costs_generate(self, cr, uid, production):
""" Calculates total costs at the end of the production.
@param production: Id of production order.
@return: Calculated amount.
"""
amount = 0.0
analytic_line_obj = self.pool.get('account.analytic.line')
for wc_line in production.workcenter_lines:
wc = wc_line.workcenter_id
if wc.costs_journal_id and wc.costs_general_account_id:
# Cost per hour
value = wc_line.hour * wc.costs_hour
account = wc.costs_hour_account_id.id
if value and account:
amount += value
# we user SUPERUSER_ID as we do not garantee an mrp user
# has access to account analytic lines but still should be
# able to produce orders
analytic_line_obj.create(cr, SUPERUSER_ID, {
'name': wc_line.name + ' (H)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'journal_id': wc.costs_journal_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.hour,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
})
# Cost per cycle
value = wc_line.cycle * wc.costs_cycle
account = wc.costs_cycle_account_id.id
if value and account:
amount += value
analytic_line_obj.create(cr, SUPERUSER_ID, {
'name': wc_line.name + ' (C)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'journal_id': wc.costs_journal_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.cycle,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
})
return amount
def action_in_production(self, cr, uid, ids, context=None):
""" Changes state to In Production and writes starting date.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'in_production', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')})
def consume_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
res += [x.id for x in order.move_lines]
return res
def test_ready(self, cr, uid, ids):
res = False
for production in self.browse(cr, uid, ids):
if production.ready_production:
res = True
return res
def _make_production_produce_line(self, cr, uid, production, context=None):
stock_move = self.pool.get('stock.move')
source_location_id = production.product_id.property_stock_production.id
destination_location_id = production.location_dest_id.id
data = {
'name': production.name,
'date': production.date_planned,
'product_id': production.product_id.id,
'product_uom': production.product_uom.id,
'product_uom_qty': production.product_qty,
'product_uos_qty': production.product_uos and production.product_uos_qty or False,
'product_uos': production.product_uos and production.product_uos.id or False,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'move_dest_id': production.move_prod_id.id,
'company_id': production.company_id.id,
'production_id': production.id,
'origin': production.name,
}
move_id = stock_move.create(cr, uid, data, context=context)
#a phantom bom cannot be used in mrp order so it's ok to assume the list returned by action_confirm
#is 1 element long, so we can take the first.
return stock_move.action_confirm(cr, uid, [move_id], context=context)[0]
def _get_raw_material_procure_method(self, cr, uid, product, context=None):
'''This method returns the procure_method to use when creating the stock move for the production raw materials'''
try:
mto_route = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'route_warehouse0_mto')[1]
except:
return "make_to_stock"
routes = product.route_ids + product.categ_id.total_route_ids
if mto_route in [x.id for x in routes]:
return "make_to_order"
return "make_to_stock"
def _create_previous_move(self, cr, uid, move_id, product, source_location_id, dest_location_id, context=None):
'''
When the routing gives a different location than the raw material location of the production order,
we should create an extra move from the raw material location to the location of the routing, which
precedes the consumption line (chained)
'''
stock_move = self.pool.get('stock.move')
move = stock_move.copy(cr, uid, move_id, default = {
'location_id': source_location_id,
'location_dest_id': dest_location_id,
'procure_method': self._get_raw_material_procure_method(cr, uid, product, context=context),
'raw_material_production_id': False,
'move_dest_id': move_id,
}, context=context)
return move
def _make_consume_line_from_data(self, cr, uid, production, product, uom_id, qty, uos_id, uos_qty, context=None):
stock_move = self.pool.get('stock.move')
# Internal shipment is created for Stockable and Consumer Products
if product.type not in ('product', 'consu'):
return False
# Take routing location as a Source Location.
source_location_id = production.location_src_id.id
prod_location_id = source_location_id
prev_move= False
if production.bom_id.routing_id and production.bom_id.routing_id.location_id and production.bom_id.routing_id.location_id.id != source_location_id:
source_location_id = production.bom_id.routing_id.location_id.id
prev_move = True
destination_location_id = production.product_id.property_stock_production.id
move_id = stock_move.create(cr, uid, {
'name': production.name,
'date': production.date_planned,
'product_id': product.id,
'product_uom_qty': qty,
'product_uom': uom_id,
'product_uos_qty': uos_id and uos_qty or False,
'product_uos': uos_id or False,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'company_id': production.company_id.id,
'procure_method': prev_move and 'make_to_stock' or self._get_raw_material_procure_method(cr, uid, product, context=context), #Make_to_stock avoids creating procurement
'raw_material_production_id': production.id,
#this saves us a browse in create()
'price_unit': product.standard_price,
'origin': production.name,
}, context=context)
if prev_move:
prev_move = self._create_previous_move(cr, uid, move_id, product, prod_location_id, source_location_id, context=context)
return move_id
def _make_production_consume_line(self, cr, uid, line, context=None):
return self._make_consume_line_from_data(cr, uid, line.production_id, line.product_id, line.product_uom.id, line.product_qty, line.product_uos.id, line.product_uos_qty, context=context)
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order.
@return: Newly generated Shipment Id.
"""
uncompute_ids = filter(lambda x: x, [not x.product_lines and x.id or False for x in self.browse(cr, uid, ids, context=context)])
self.action_compute(cr, uid, uncompute_ids, context=context)
for production in self.browse(cr, uid, ids, context=context):
self._make_production_produce_line(cr, uid, production, context=context)
stock_moves = []
for line in production.product_lines:
stock_move_id = self._make_production_consume_line(cr, uid, line, context=context)
if stock_move_id:
stock_moves.append(stock_move_id)
if stock_moves:
self.pool.get('stock.move').action_confirm(cr, uid, stock_moves, context=context)
production.write({'state': 'confirmed'}, context=context)
return 0
def action_assign(self, cr, uid, ids, context=None):
"""
Checks the availability on the consume lines of the production order
"""
move_obj = self.pool.get("stock.move")
for production in self.browse(cr, uid, ids, context=context):
move_obj.action_assign(cr, uid, [x.id for x in production.move_lines], context=context)
def force_production(self, cr, uid, ids, *args):
""" Assigns products.
@param *args: Arguments
@return: True
"""
move_obj = self.pool.get('stock.move')
for order in self.browse(cr, uid, ids):
move_obj.force_assign(cr, uid, [x.id for x in order.move_lines])
return True
class mrp_production_workcenter_line(osv.osv):
_name = 'mrp.production.workcenter.line'
_description = 'Work Order'
_order = 'sequence'
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Work Order', required=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'cycle': fields.float('Number of Cycles', digits=(16, 2)),
'hour': fields.float('Number of Hours', digits=(16, 2)),
'sequence': fields.integer('Sequence', required=True, help="Gives the sequence order when displaying a list of work orders."),
'production_id': fields.many2one('mrp.production', 'Manufacturing Order',
track_visibility='onchange', select=True, ondelete='cascade', required=True),
}
_defaults = {
'sequence': lambda *a: 1,
'hour': lambda *a: 0,
'cycle': lambda *a: 0,
}
class mrp_production_product_line(osv.osv):
_name = 'mrp.production.product.line'
_description = 'Production Scheduled Product'
_columns = {
'name': fields.char('Name', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_uos_qty': fields.float('Product UOS Quantity'),
'product_uos': fields.many2one('product.uom', 'Product UOS'),
'production_id': fields.many2one('mrp.production', 'Production Order', select=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
leeseuljeong/leeseulstack_neutron
|
neutron/neutron_plugin_base_v2.py
|
17
|
14715
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
v2 Neutron Plug-in API specification.
:class:`NeutronPluginBaseV2` provides the definition of minimum set of
methods that needs to be implemented by a v2 Neutron Plug-in.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class NeutronPluginBaseV2(object):
@abc.abstractmethod
def create_subnet(self, context, subnet):
"""Create a subnet.
Create a subnet, which represents a range of IP addresses
that can be allocated to devices
:param context: neutron api request context
:param subnet: dictionary describing the subnet, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abc.abstractmethod
def update_subnet(self, context, id, subnet):
"""Update values of a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to update.
:param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_subnet(self, context, id, fields=None):
"""Retrieve a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to fetch.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of subnets.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
Values in this dictiontary are an iterable containing
values that will be used for an exact match comparison
for that value. Each result returned by this
function will have matched one of the values for each
key in filters.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_subnets_count(self, context, filters=None):
"""Return the number of subnets.
The result depends on the identity of
the user making the request (as indicated by the context) as well as
any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will
have matched one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError()
@abc.abstractmethod
def delete_subnet(self, context, id):
"""Delete a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to delete.
"""
pass
@abc.abstractmethod
def create_network(self, context, network):
"""Create a network.
Create a network, which represents an L2 network segment which
can have a set of subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abc.abstractmethod
def update_network(self, context, id, network):
"""Update values of a network.
:param context: neutron api request context
:param id: UUID representing the network to update.
:param network: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_network(self, context, id, fields=None):
"""Retrieve a network.
:param context: neutron api request context
:param id: UUID representing the network to fetch.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of networks.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_networks_count(self, context, filters=None):
"""Return the number of networks.
The result depends on the identity
of the user making the request (as indicated by the context) as well
as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. Values in
this dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will have
matched one of the values for each key in filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError()
@abc.abstractmethod
def delete_network(self, context, id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
"""
pass
@abc.abstractmethod
def create_port(self, context, port):
"""Create a port.
Create a port, which is a connection point of a device (e.g., a VM
NIC) to attach to a L2 neutron network.
:param context: neutron api request context
:param port: dictionary describing the port, with keys as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. All keys will be
populated.
"""
pass
@abc.abstractmethod
def update_port(self, context, id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_port(self, context, id, fields=None):
"""Retrieve a port.
:param context: neutron api request context
:param id: UUID representing the port to fetch.
:param fields: a list of strings that are valid keys in a port
dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of ports.
The contents of the list depends on the identity of the user making
the request (as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`. Values
in this dictiontary are an iterable containing values
that will be used for an exact match comparison for
that value. Each result returned by this function will
have matched one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
port dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_ports_count(self, context, filters=None):
"""Return the number of ports.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError()
@abc.abstractmethod
def delete_port(self, context, id):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
"""
pass
def start_rpc_listeners(self):
"""Start the RPC listeners.
Most plugins start RPC listeners implicitly on initialization. In
order to support multiple process RPC, the plugin needs to expose
control over when this is started.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError()
def rpc_workers_supported(self):
"""Return whether the plugin supports multiple RPC workers.
A plugin that supports multiple RPC workers should override the
start_rpc_listeners method to ensure that this method returns True and
that start_rpc_listeners is called at the appropriate time.
Alternately, a plugin can override this method to customize detection
of support for multiple rpc workers
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
return (self.__class__.start_rpc_listeners !=
NeutronPluginBaseV2.start_rpc_listeners)
|
apache-2.0
|
wooga/airflow
|
airflow/api/auth/backend/kerberos_auth.py
|
5
|
5597
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013, Michael Komitee
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Kerberos authentication module"""
import logging
import os
from functools import wraps
from socket import getfqdn
import kerberos
# noinspection PyProtectedMember
from flask import Response, _request_ctx_stack as stack, g, make_response, request # type: ignore
from requests_kerberos import HTTPKerberosAuth
from airflow.configuration import conf
log = logging.getLogger(__name__)
# pylint: disable=c-extension-no-member
CLIENT_AUTH = HTTPKerberosAuth(service='airflow')
class KerberosService: # pylint: disable=too-few-public-methods
"""Class to keep information about the Kerberos Service initialized """
def __init__(self):
self.service_name = None
# Stores currently initialized Kerberos Service
_KERBEROS_SERVICE = KerberosService()
def init_app(app):
"""Initializes application with kerberos"""
hostname = app.config.get('SERVER_NAME')
if not hostname:
hostname = getfqdn()
log.info("Kerberos: hostname %s", hostname)
service = 'airflow'
_KERBEROS_SERVICE.service_name = "{}@{}".format(service, hostname)
if 'KRB5_KTNAME' not in os.environ:
os.environ['KRB5_KTNAME'] = conf.get('kerberos', 'keytab')
try:
log.info("Kerberos init: %s %s", service, hostname)
principal = kerberos.getServerPrincipalDetails(service, hostname)
except kerberos.KrbError as err:
log.warning("Kerberos: %s", err)
else:
log.info("Kerberos API: server is %s", principal)
def _unauthorized():
"""
Indicate that authorization is required
:return:
"""
return Response("Unauthorized", 401, {"WWW-Authenticate": "Negotiate"})
def _forbidden():
return Response("Forbidden", 403)
def _gssapi_authenticate(token):
state = None
ctx = stack.top
try:
return_code, state = kerberos.authGSSServerInit(_KERBEROS_SERVICE.service_name)
if return_code != kerberos.AUTH_GSS_COMPLETE:
return None
return_code = kerberos.authGSSServerStep(state, token)
if return_code == kerberos.AUTH_GSS_COMPLETE:
ctx.kerberos_token = kerberos.authGSSServerResponse(state)
ctx.kerberos_user = kerberos.authGSSServerUserName(state)
return return_code
if return_code == kerberos.AUTH_GSS_CONTINUE:
return kerberos.AUTH_GSS_CONTINUE
return None
except kerberos.GSSError:
return None
finally:
if state:
kerberos.authGSSServerClean(state)
def requires_authentication(function):
"""Decorator for functions that require authentication with Kerberos"""
@wraps(function)
def decorated(*args, **kwargs):
header = request.headers.get("Authorization")
if header:
ctx = stack.top
token = ''.join(header.split()[1:])
return_code = _gssapi_authenticate(token)
if return_code == kerberos.AUTH_GSS_COMPLETE:
g.user = ctx.kerberos_user
response = function(*args, **kwargs)
response = make_response(response)
if ctx.kerberos_token is not None:
response.headers['WWW-Authenticate'] = ' '.join(['negotiate',
ctx.kerberos_token])
return response
if return_code != kerberos.AUTH_GSS_CONTINUE:
return _forbidden()
return _unauthorized()
return decorated
|
apache-2.0
|
patsissons/Flexget
|
flexget/plugins/plugin_spy_headers.py
|
23
|
4220
|
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib2
import httplib
import socket
from flexget import plugin
from flexget.event import event
log = logging.getLogger('spy_headers')
class CustomHTTPConnection(httplib.HTTPConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPConnection.__init__(self, *args, **kwargs)
self.stored_headers = []
def putheader(self, header, value):
self.stored_headers.append((header, value))
httplib.HTTPConnection.putheader(self, header, value)
class HTTPCaptureHeaderHandler(urllib2.AbstractHTTPHandler):
handler_order = 400
def http_open(self, req):
return self.do_open(CustomHTTPConnection, req)
http_request = urllib2.AbstractHTTPHandler.do_request_
https_request = urllib2.AbstractHTTPHandler.do_request_
https_open = http_open
def do_open(self, http_class, req):
# All code here lifted directly from the python library
host = req.get_host()
if not host:
from urllib2 import URLError
raise URLError('no host given')
h = http_class(host) # will parse host:port
h.set_debuglevel(self._debuglevel)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
headers["Connection"] = "close"
headers = dict(
(name.title(), val) for name, val in headers.items())
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
r = h.getresponse()
except socket.error as err: # XXX what error?
raise urllib2.URLError(err)
r.recv = r.read
fp = socket._fileobject(r, close=True)
resp = urllib2.addinfourl(fp, r.msg, req.get_full_url())
resp.code = r.status
resp.msg = r.reason
# After this our custom code!
req.all_sent_headers = h.stored_headers
log.info('Request : %s' % req.get_full_url())
log.info('Response : %s (%s)' % (resp.code, resp.msg))
# log headers
log.info('-- Headers: --------------------------')
for sh in h.stored_headers:
log.info('%s: %s' % (sh[0], sh[1]))
log.info('--------------------------------------')
return resp
class PluginSpyHeaders(object):
"""
Logs all headers sent in http requests. Useful for resolving issues.
WARNING: At the moment this modifies requests somehow!
"""
schema = {'type': 'boolean'}
@staticmethod
def log_requests_headers(response, **kwargs):
log.info('Request : %s' % response.request.url)
log.info('Response : %s (%s)' % (response.status_code, response.reason))
log.info('-- Headers: --------------------------')
for header, value in response.request.headers.iteritems():
log.info('%s: %s' % (header, value))
log.info('--------------------------------------')
return response
def on_task_start(self, task, config):
if not config:
return
# Add our hook to the requests session
task.requests.hooks['response'].append(self.log_requests_headers)
# Backwards compatibility for plugins still using urllib
if urllib2._opener:
log.debug('Adding HTTPCaptureHeaderHandler to default opener')
urllib2._opener.add_handler(HTTPCaptureHeaderHandler())
else:
log.debug('Creating new opener and installing it')
opener = urllib2.build_opener(HTTPCaptureHeaderHandler())
urllib2.install_opener(opener)
def on_task_exit(self, task, config):
"""Task exiting, remove additions"""
if not config:
return
task.requests.hooks['response'].remove(self.log_requests_headers)
if urllib2._opener:
log.debug('Removing urllib2 default opener')
# TODO: this uninstalls all other handlers as well, but does it matter?
urllib2.install_opener(None)
# remove also on abort
on_task_abort = on_task_exit
@event('plugin.register')
def register_plugin():
plugin.register(PluginSpyHeaders, 'spy_headers', api_ver=2)
|
mit
|
avanzosc/avanzosc6.1
|
avanzosc_stock_invoice_pesamod/__init__.py
|
1
|
1095
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2012 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import stock_location
import invoice_mod
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jdemaeyer/scrapy
|
tests/test_crawl.py
|
14
|
10928
|
import json
import socket
import logging
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.http import Request
from scrapy.crawler import CrawlerRunner
from scrapy.utils.python import to_unicode
from tests import mock
from tests.spiders import FollowAllSpider, DelaySpider, SimpleSpider, \
BrokenStartRequestsSpider, SingleRequestSpider, DuplicateStartRequestsSpider
from tests.mockserver import MockServer
class CrawlTestCase(TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self.runner = CrawlerRunner()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_follow_all(self):
crawler = self.runner.create_crawler(FollowAllSpider)
yield crawler.crawl()
self.assertEqual(len(crawler.spider.urls_visited), 11) # 10 + start_url
@defer.inlineCallbacks
def test_delay(self):
# short to long delays
yield self._test_delay(0.2, False)
yield self._test_delay(1, False)
# randoms
yield self._test_delay(0.2, True)
yield self._test_delay(1, True)
@defer.inlineCallbacks
def _test_delay(self, delay, randomize):
settings = {"DOWNLOAD_DELAY": delay, 'RANDOMIZE_DOWNLOAD_DELAY': randomize}
crawler = CrawlerRunner(settings).create_crawler(FollowAllSpider)
yield crawler.crawl(maxlatency=delay * 2)
t = crawler.spider.times
totaltime = t[-1] - t[0]
avgd = totaltime / (len(t) - 1)
tolerance = 0.6 if randomize else 0.2
self.assertTrue(avgd > delay * (1 - tolerance),
"download delay too small: %s" % avgd)
@defer.inlineCallbacks
def test_timeout_success(self):
crawler = self.runner.create_crawler(DelaySpider)
yield crawler.crawl(n=0.5)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 > 0)
self.assertTrue(crawler.spider.t2 > crawler.spider.t1)
@defer.inlineCallbacks
def test_timeout_failure(self):
crawler = CrawlerRunner({"DOWNLOAD_TIMEOUT": 0.35}).create_crawler(DelaySpider)
yield crawler.crawl(n=0.5)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 == 0)
self.assertTrue(crawler.spider.t2_err > 0)
self.assertTrue(crawler.spider.t2_err > crawler.spider.t1)
# server hangs after receiving response headers
yield crawler.crawl(n=0.5, b=1)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 == 0)
self.assertTrue(crawler.spider.t2_err > 0)
self.assertTrue(crawler.spider.t2_err > crawler.spider.t1)
@defer.inlineCallbacks
def test_retry_503(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/status?n=503")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_conn_failed(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:65432/status?n=503")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_dns_error(self):
with mock.patch('socket.gethostbyname',
side_effect=socket.gaierror(-5, 'No address associated with hostname')):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://example.com/")
self._assert_retried(l)
@defer.inlineCallbacks
def test_start_requests_bug_before_yield(self):
with LogCapture('scrapy', level=logging.ERROR) as l:
crawler = self.runner.create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl(fail_before_yield=1)
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIsNotNone(record.exc_info)
self.assertIs(record.exc_info[0], ZeroDivisionError)
@defer.inlineCallbacks
def test_start_requests_bug_yielding(self):
with LogCapture('scrapy', level=logging.ERROR) as l:
crawler = self.runner.create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl(fail_yielding=1)
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIsNotNone(record.exc_info)
self.assertIs(record.exc_info[0], ZeroDivisionError)
@defer.inlineCallbacks
def test_start_requests_lazyness(self):
settings = {"CONCURRENT_REQUESTS": 1}
crawler = CrawlerRunner(settings).create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl()
#self.assertTrue(False, crawler.spider.seedsseen)
#self.assertTrue(crawler.spider.seedsseen.index(None) < crawler.spider.seedsseen.index(99),
# crawler.spider.seedsseen)
@defer.inlineCallbacks
def test_start_requests_dupes(self):
settings = {"CONCURRENT_REQUESTS": 1}
crawler = CrawlerRunner(settings).create_crawler(DuplicateStartRequestsSpider)
yield crawler.crawl(dont_filter=True, distinct_urls=2, dupe_factor=3)
self.assertEqual(crawler.spider.visited, 6)
yield crawler.crawl(dont_filter=False, distinct_urls=3, dupe_factor=4)
self.assertEqual(crawler.spider.visited, 3)
@defer.inlineCallbacks
def test_unbounded_response(self):
# Completeness of responses without Content-Length or Transfer-Encoding
# can not be determined, we treat them as valid but flagged as "partial"
from six.moves.urllib.parse import urlencode
query = urlencode({'raw': '''\
HTTP/1.1 200 OK
Server: Apache-Coyote/1.1
X-Powered-By: Servlet 2.4; JBoss-4.2.3.GA (build: SVNTag=JBoss_4_2_3_GA date=200807181417)/JBossWeb-2.0
Set-Cookie: JSESSIONID=08515F572832D0E659FD2B0D8031D75F; Path=/
Pragma: no-cache
Expires: Thu, 01 Jan 1970 00:00:00 GMT
Cache-Control: no-cache
Cache-Control: no-store
Content-Type: text/html;charset=UTF-8
Content-Language: en
Date: Tue, 27 Aug 2013 13:05:05 GMT
Connection: close
foo body
with multiples lines
'''})
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/raw?{0}".format(query))
self.assertEqual(str(l).count("Got response 200"), 1)
@defer.inlineCallbacks
def test_retry_conn_lost(self):
# connection lost after receiving data
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/drop?abort=0")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_conn_aborted(self):
# connection lost before receiving data
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/drop?abort=1")
self._assert_retried(l)
def _assert_retried(self, log):
self.assertEqual(str(log).count("Retrying"), 2)
self.assertEqual(str(log).count("Gave up retrying"), 1)
@defer.inlineCallbacks
def test_referer_header(self):
"""Referer header is set by RefererMiddleware unless it is already set"""
req0 = Request('http://localhost:8998/echo?headers=1&body=0', dont_filter=1)
req1 = req0.replace()
req2 = req0.replace(headers={'Referer': None})
req3 = req0.replace(headers={'Referer': 'http://example.com'})
req0.meta['next'] = req1
req1.meta['next'] = req2
req2.meta['next'] = req3
crawler = self.runner.create_crawler(SingleRequestSpider)
yield crawler.crawl(seed=req0)
# basic asserts in case of weird communication errors
self.assertIn('responses', crawler.spider.meta)
self.assertNotIn('failures', crawler.spider.meta)
# start requests doesn't set Referer header
echo0 = json.loads(to_unicode(crawler.spider.meta['responses'][2].body))
self.assertNotIn('Referer', echo0['headers'])
# following request sets Referer to start request url
echo1 = json.loads(to_unicode(crawler.spider.meta['responses'][1].body))
self.assertEqual(echo1['headers'].get('Referer'), [req0.url])
# next request avoids Referer header
echo2 = json.loads(to_unicode(crawler.spider.meta['responses'][2].body))
self.assertNotIn('Referer', echo2['headers'])
# last request explicitly sets a Referer header
echo3 = json.loads(to_unicode(crawler.spider.meta['responses'][3].body))
self.assertEqual(echo3['headers'].get('Referer'), ['http://example.com'])
@defer.inlineCallbacks
def test_engine_status(self):
from scrapy.utils.engine import get_engine_status
est = []
def cb(response):
est.append(get_engine_status(crawler.engine))
crawler = self.runner.create_crawler(SingleRequestSpider)
yield crawler.crawl(seed='http://localhost:8998/', callback_func=cb)
self.assertEqual(len(est), 1, est)
s = dict(est[0])
self.assertEqual(s['engine.spider.name'], crawler.spider.name)
self.assertEqual(s['len(engine.scraper.slot.active)'], 1)
@defer.inlineCallbacks
def test_graceful_crawl_error_handling(self):
"""
Test whether errors happening anywhere in Crawler.crawl() are properly
reported (and not somehow swallowed) after a graceful engine shutdown.
The errors should not come from within Scrapy's core but from within
spiders/middlewares/etc., e.g. raised in Spider.start_requests(),
SpiderMiddleware.process_start_requests(), etc.
"""
class TestError(Exception):
pass
class FaultySpider(SimpleSpider):
def start_requests(self):
raise TestError
crawler = self.runner.create_crawler(FaultySpider)
yield self.assertFailure(crawler.crawl(), TestError)
self.assertFalse(crawler.crawling)
@defer.inlineCallbacks
def test_crawlerrunner_accepts_crawler(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as log:
yield self.runner.crawl(crawler, "http://localhost:8998/status?n=200")
self.assertIn("Got response 200", str(log))
@defer.inlineCallbacks
def test_crawl_multiple(self):
self.runner.crawl(SimpleSpider, "http://localhost:8998/status?n=200")
self.runner.crawl(SimpleSpider, "http://localhost:8998/status?n=503")
with LogCapture() as log:
yield self.runner.join()
self._assert_retried(log)
self.assertIn("Got response 200", str(log))
|
bsd-3-clause
|
nilsgrabbert/spark
|
examples/src/main/python/mllib/naive_bayes_example.py
|
106
|
2285
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
NaiveBayes Example.
Usage:
`spark-submit --master local[4] examples/src/main/python/mllib/naive_bayes_example.py`
"""
from __future__ import print_function
import shutil
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonNaiveBayesExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split data approximately into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4])
# Train a naive Bayes model.
model = NaiveBayes.train(training, 1.0)
# Make prediction and test accuracy.
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
print('model accuracy {}'.format(accuracy))
# Save and load model
output_dir = 'target/tmp/myNaiveBayesModel'
shutil.rmtree(output_dir, ignore_errors=True)
model.save(sc, output_dir)
sameModel = NaiveBayesModel.load(sc, output_dir)
predictionAndLabel = test.map(lambda p: (sameModel.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
print('sameModel accuracy {}'.format(accuracy))
# $example off$
|
apache-2.0
|
tdfischer/protobuf
|
python/google/protobuf/internal/decoder.py
|
223
|
26136
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = '[email protected] (Kenton Varda)'
import struct
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
_NAN = _POS_INF * 0
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(mask):
"""Like _VarintDecoder() but decodes signed values."""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
_DecodeVarint = _VarintDecoder((1 << 64) - 1)
_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1)
_DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1)
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos)
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if ((float_bytes[3] in '\x7F\xFF')
and (float_bytes[2] >= '\x80')):
# If at least one significand bit is set...
if float_bytes[0:3] != '\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3] == '\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7] in '\x7F\xFF')
and (double_bytes[6] >= '\xF0')
and (double_bytes[0:7] != '\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
# --------------------------------------------------------------------
Int32Decoder = EnumDecoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = unicode
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(local_unicode(buffer[pos:new_pos], 'utf-8'))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = local_unicode(buffer[pos:new_pos], 'utf-8')
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(extensions_by_number):
"""Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append((MESSAGE_SET_ITEM_TAG,
buffer[message_set_item_start:pos]))
return pos
return DecodeItem
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
local_ord = ord
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = local_ord(tag_bytes[0]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
|
bsd-3-clause
|
jfhumann/servo
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/inject_meta_charset.py
|
1730
|
2746
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def __init__(self, source, encoding):
_base.Filter.__init__(self, source)
self.encoding = encoding
def __iter__(self):
state = "pre_head"
meta_found = (self.encoding is None)
pending = []
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag":
if token["name"].lower() == "head":
state = "in_head"
elif type == "EmptyTag":
if token["name"].lower() == "meta":
# replace charset with actual encoding
has_http_equiv_content_type = False
for (namespace, name), value in token["data"].items():
if namespace is not None:
continue
elif name.lower() == 'charset':
token["data"][(namespace, name)] = self.encoding
meta_found = True
break
elif name == 'http-equiv' and value.lower() == 'content-type':
has_http_equiv_content_type = True
else:
if has_http_equiv_content_type and (None, "content") in token["data"]:
token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding
meta_found = True
elif token["name"].lower() == "head" and not meta_found:
# insert meta into empty head
yield {"type": "StartTag", "name": "head",
"data": token["data"]}
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
yield {"type": "EndTag", "name": "head"}
meta_found = True
continue
elif type == "EndTag":
if token["name"].lower() == "head" and pending:
# insert meta into head (if necessary) and flush pending queue
yield pending.pop(0)
if not meta_found:
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
while pending:
yield pending.pop(0)
meta_found = True
state = "post_head"
if state == "in_head":
pending.append(token)
else:
yield token
|
mpl-2.0
|
UBERMALLOW/external_skia
|
tools/jsondiff.py
|
67
|
7458
|
#!/usr/bin/python
'''
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
'''
Gathers diffs between 2 JSON expectations files, or between actual and
expected results within a single JSON actual-results file,
and generates an old-vs-new diff dictionary.
TODO(epoger): Fix indentation in this file (2-space indents, not 4-space).
'''
# System-level imports
import argparse
import json
import os
import sys
import urllib2
# Imports from within Skia
#
# We need to add the 'gm' directory, so that we can import gm_json.py within
# that directory. That script allows us to parse the actual-results.json file
# written out by the GM tool.
# Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end*
# so any dirs that are already in the PYTHONPATH will be preferred.
#
# This assumes that the 'gm' directory has been checked out as a sibling of
# the 'tools' directory containing this script, which will be the case if
# 'trunk' was checked out as a single unit.
GM_DIRECTORY = os.path.realpath(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm'))
if GM_DIRECTORY not in sys.path:
sys.path.append(GM_DIRECTORY)
import gm_json
# Object that generates diffs between two JSON gm result files.
class GMDiffer(object):
def __init__(self):
pass
def _GetFileContentsAsString(self, filepath):
"""Returns the full contents of a file, as a single string.
If the filename looks like a URL, download its contents.
If the filename is None, return None."""
if filepath is None:
return None
elif filepath.startswith('http:') or filepath.startswith('https:'):
return urllib2.urlopen(filepath).read()
else:
return open(filepath, 'r').read()
def _GetExpectedResults(self, contents):
"""Returns the dictionary of expected results from a JSON string,
in this form:
{
'test1' : 14760033689012826769,
'test2' : 9151974350149210736,
...
}
We make these simplifying assumptions:
1. Each test has either 0 or 1 allowed results.
2. All expectations are of type JSONKEY_HASHTYPE_BITMAP_64BITMD5.
Any tests which violate those assumptions will cause an exception to
be raised.
Any tests for which we have no expectations will be left out of the
returned dictionary.
"""
result_dict = {}
json_dict = gm_json.LoadFromString(contents)
all_expectations = json_dict[gm_json.JSONKEY_EXPECTEDRESULTS]
# Prevent https://code.google.com/p/skia/issues/detail?id=1588
if not all_expectations:
return result_dict
for test_name in all_expectations.keys():
test_expectations = all_expectations[test_name]
allowed_digests = test_expectations[
gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS]
if allowed_digests:
num_allowed_digests = len(allowed_digests)
if num_allowed_digests > 1:
raise ValueError(
'test %s has %d allowed digests' % (
test_name, num_allowed_digests))
digest_pair = allowed_digests[0]
if digest_pair[0] != gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5:
raise ValueError(
'test %s has unsupported hashtype %s' % (
test_name, digest_pair[0]))
result_dict[test_name] = digest_pair[1]
return result_dict
def _GetActualResults(self, contents):
"""Returns the dictionary of actual results from a JSON string,
in this form:
{
'test1' : 14760033689012826769,
'test2' : 9151974350149210736,
...
}
We make these simplifying assumptions:
1. All results are of type JSONKEY_HASHTYPE_BITMAP_64BITMD5.
Any tests which violate those assumptions will cause an exception to
be raised.
Any tests for which we have no actual results will be left out of the
returned dictionary.
"""
result_dict = {}
json_dict = gm_json.LoadFromString(contents)
all_result_types = json_dict[gm_json.JSONKEY_ACTUALRESULTS]
for result_type in all_result_types.keys():
results_of_this_type = all_result_types[result_type]
if results_of_this_type:
for test_name in results_of_this_type.keys():
digest_pair = results_of_this_type[test_name]
if digest_pair[0] != gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5:
raise ValueError(
'test %s has unsupported hashtype %s' % (
test_name, digest_pair[0]))
result_dict[test_name] = digest_pair[1]
return result_dict
def _DictionaryDiff(self, old_dict, new_dict):
"""Generate a dictionary showing the diffs between old_dict and new_dict.
Any entries which are identical across them will be left out."""
diff_dict = {}
all_keys = set(old_dict.keys() + new_dict.keys())
for key in all_keys:
if old_dict.get(key) != new_dict.get(key):
new_entry = {}
new_entry['old'] = old_dict.get(key)
new_entry['new'] = new_dict.get(key)
diff_dict[key] = new_entry
return diff_dict
def GenerateDiffDict(self, oldfile, newfile=None):
"""Generate a dictionary showing the diffs:
old = expectations within oldfile
new = expectations within newfile
If newfile is not specified, then 'new' is the actual results within
oldfile.
"""
return self.GenerateDiffDictFromStrings(self._GetFileContentsAsString(oldfile),
self._GetFileContentsAsString(newfile))
def GenerateDiffDictFromStrings(self, oldjson, newjson=None):
"""Generate a dictionary showing the diffs:
old = expectations within oldjson
new = expectations within newjson
If newfile is not specified, then 'new' is the actual results within
oldfile.
"""
old_results = self._GetExpectedResults(oldjson)
if newjson:
new_results = self._GetExpectedResults(newjson)
else:
new_results = self._GetActualResults(oldjson)
return self._DictionaryDiff(old_results, new_results)
def _Main():
parser = argparse.ArgumentParser()
parser.add_argument(
'old',
help='Path to JSON file whose expectations to display on ' +
'the "old" side of the diff. This can be a filepath on ' +
'local storage, or a URL.')
parser.add_argument(
'new', nargs='?',
help='Path to JSON file whose expectations to display on ' +
'the "new" side of the diff; if not specified, uses the ' +
'ACTUAL results from the "old" JSON file. This can be a ' +
'filepath on local storage, or a URL.')
args = parser.parse_args()
differ = GMDiffer()
diffs = differ.GenerateDiffDict(oldfile=args.old, newfile=args.new)
json.dump(diffs, sys.stdout, sort_keys=True, indent=2)
if __name__ == '__main__':
_Main()
|
bsd-3-clause
|
flyballlabs/threatdetectionservice
|
api/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py
|
450
|
9668
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll, winapi_test
winterm = None
if windll is not None:
winterm = WinTerm()
def is_stream_closed(stream):
return not hasattr(stream, 'closed') or stream.closed
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
# We test if the WinAPI works, because even if we are on Windows
# we may be using a terminal that doesn't support the WinAPI
# (e.g. Cygwin Terminal). In this case it's up to the terminal
# to support the ANSI codes.
conversion_supported = on_windows and winapi_test()
# should we strip ANSI sequences from our output?
if strip is None:
strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not is_stream_closed(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command in '\x07': # \x07 = BEL
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
|
apache-2.0
|
RNAcentral/rnacentral-webcode
|
rnacentral/portal/models/ensembl_karyotype.py
|
1
|
1093
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from caching.base import CachingMixin, CachingManager
from django.contrib.postgres.fields import JSONField
from django.db import models
from portal.models import EnsemblAssembly
class EnsemblKaryotype(CachingMixin, models.Model):
assembly = models.ForeignKey(EnsemblAssembly, related_name='karyotype', db_column='assembly_id', to_field='assembly_id', on_delete=models.CASCADE)
karyotype = JSONField()
objects = CachingManager()
class Meta:
db_table = 'ensembl_karyotype'
|
apache-2.0
|
staslev/incubator-beam
|
sdks/python/apache_beam/runners/dataflow/internal/clients/dataflow/message_matchers.py
|
9
|
4292
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from hamcrest.core.base_matcher import BaseMatcher
IGNORED = object()
class MetricStructuredNameMatcher(BaseMatcher):
"""Matches a MetricStructuredName."""
def __init__(self,
name=IGNORED,
origin=IGNORED,
context=IGNORED):
"""Creates a MetricsStructuredNameMatcher.
Any property not passed in to the constructor will be ignored when matching.
Args:
name: A string with the metric name.
origin: A string with the metric namespace.
context: A key:value dictionary that will be matched to the
structured name.
"""
if context != IGNORED and not isinstance(context, dict):
raise ValueError('context must be a Python dictionary.')
self.name = name
self.origin = origin
self.context = context
def _matches(self, item):
if self.name != IGNORED and item.name != self.name:
return False
if self.origin != IGNORED and item.origin != self.origin:
return False
if self.context != IGNORED:
for key, name in self.context.iteritems():
if key not in item.context:
return False
if name != IGNORED and item.context[key] != name:
return False
return True
def describe_to(self, description):
descriptors = []
if self.name != IGNORED:
descriptors.append('name is {}'.format(self.name))
if self.origin != IGNORED:
descriptors.append('origin is {}'.format(self.origin))
if self.context != IGNORED:
descriptors.append('context is ({})'.format(str(self.context)))
item_description = ' and '.join(descriptors)
description.append(item_description)
class MetricUpdateMatcher(BaseMatcher):
"""Matches a metrics update protocol buffer."""
def __init__(self,
cumulative=IGNORED,
name=IGNORED,
scalar=IGNORED,
kind=IGNORED):
"""Creates a MetricUpdateMatcher.
Any property not passed in to the constructor will be ignored when matching.
Args:
cumulative: A boolean.
name: A MetricStructuredNameMatcher object that matches the name.
scalar: An integer with the metric update.
kind: A string defining the kind of counter.
"""
if name != IGNORED and not isinstance(name, MetricStructuredNameMatcher):
raise ValueError('name must be a MetricStructuredNameMatcher.')
self.cumulative = cumulative
self.name = name
self.scalar = scalar
self.kind = kind
def _matches(self, item):
if self.cumulative != IGNORED and item.cumulative != self.cumulative:
return False
if self.name != IGNORED and not self.name._matches(item.name):
return False
if self.kind != IGNORED and item.kind != self.kind:
return False
if self.scalar != IGNORED:
value_property = [p
for p in item.scalar.object_value.properties
if p.key == 'value']
int_value = value_property[0].value.integer_value
if self.scalar != int_value:
return False
return True
def describe_to(self, description):
descriptors = []
if self.cumulative != IGNORED:
descriptors.append('cumulative is {}'.format(self.cumulative))
if self.name != IGNORED:
descriptors.append('name is {}'.format(self.name))
if self.scalar != IGNORED:
descriptors.append('scalar is ({})'.format(str(self.scalar)))
item_description = ' and '.join(descriptors)
description.append(item_description)
|
apache-2.0
|
fevxie/odoo
|
addons/crm_helpdesk/__init__.py
|
442
|
1081
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_helpdesk
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
keyurpatel076/MissionPlannerGit
|
packages/IronPython.StdLib.2.7.5-beta1/content/Lib/fractions.py
|
252
|
22390
|
# Originally contributed by Sjoerd Mullender.
# Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>.
"""Rational, infinite-precision, real numbers."""
from __future__ import division
from decimal import Decimal
import math
import numbers
import operator
import re
__all__ = ['Fraction', 'gcd']
Rational = numbers.Rational
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, a%b
return a
_RATIONAL_FORMAT = re.compile(r"""
\A\s* # optional whitespace at the start, then
(?P<sign>[-+]?) # an optional sign, then
(?=\d|\.\d) # lookahead for digit or .digit
(?P<num>\d*) # numerator (possibly empty)
(?: # followed by
(?:/(?P<denom>\d+))? # an optional denominator
| # or
(?:\.(?P<decimal>\d*))? # an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and optional exponent
)
\s*\Z # and optional whitespace to finish
""", re.VERBOSE | re.IGNORECASE)
class Fraction(Rational):
"""This class implements rational numbers.
In the two-argument form of the constructor, Fraction(8, 6) will
produce a rational number equivalent to 4/3. Both arguments must
be Rational. The numerator defaults to 0 and the denominator
defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
Fractions can also be constructed from:
- numeric strings similar to those accepted by the
float constructor (for example, '-2.3' or '1e10')
- strings of the form '123/456'
- float and Decimal instances
- other Rational instances (including integers)
"""
__slots__ = ('_numerator', '_denominator')
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None):
"""Constructs a Fraction.
Takes a string like '3/2' or '1.5', another Rational instance, a
numerator/denominator pair, or a float.
Examples
--------
>>> Fraction(10, -8)
Fraction(-5, 4)
>>> Fraction(Fraction(1, 7), 5)
Fraction(1, 35)
>>> Fraction(Fraction(1, 7), Fraction(2, 3))
Fraction(3, 14)
>>> Fraction('314')
Fraction(314, 1)
>>> Fraction('-35/4')
Fraction(-35, 4)
>>> Fraction('3.1415') # conversion from numeric string
Fraction(6283, 2000)
>>> Fraction('-47e-2') # string may include a decimal exponent
Fraction(-47, 100)
>>> Fraction(1.47) # direct construction from float (exact conversion)
Fraction(6620291452234629, 4503599627370496)
>>> Fraction(2.25)
Fraction(9, 4)
>>> Fraction(Decimal('1.47'))
Fraction(147, 100)
"""
self = super(Fraction, cls).__new__(cls)
if denominator is None:
if isinstance(numerator, Rational):
self._numerator = numerator.numerator
self._denominator = numerator.denominator
return self
elif isinstance(numerator, float):
# Exact conversion from float
value = Fraction.from_float(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, Decimal):
value = Fraction.from_decimal(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, basestring):
# Handle construction from strings.
m = _RATIONAL_FORMAT.match(numerator)
if m is None:
raise ValueError('Invalid literal for Fraction: %r' %
numerator)
numerator = int(m.group('num') or '0')
denom = m.group('denom')
if denom:
denominator = int(denom)
else:
denominator = 1
decimal = m.group('decimal')
if decimal:
scale = 10**len(decimal)
numerator = numerator * scale + int(decimal)
denominator *= scale
exp = m.group('exp')
if exp:
exp = int(exp)
if exp >= 0:
numerator *= 10**exp
else:
denominator *= 10**-exp
if m.group('sign') == '-':
numerator = -numerator
else:
raise TypeError("argument should be a string "
"or a Rational instance")
elif (isinstance(numerator, Rational) and
isinstance(denominator, Rational)):
numerator, denominator = (
numerator.numerator * denominator.denominator,
denominator.numerator * numerator.denominator
)
else:
raise TypeError("both arguments should be "
"Rational instances")
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
g = gcd(numerator, denominator)
self._numerator = numerator // g
self._denominator = denominator // g
return self
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
Beware that Fraction.from_float(0.3) != Fraction(3, 10).
"""
if isinstance(f, numbers.Integral):
return cls(f)
elif not isinstance(f, float):
raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
(cls.__name__, f, type(f).__name__))
if math.isnan(f) or math.isinf(f):
raise TypeError("Cannot convert %r to %s." % (f, cls.__name__))
return cls(*f.as_integer_ratio())
@classmethod
def from_decimal(cls, dec):
"""Converts a finite Decimal instance to a rational number, exactly."""
from decimal import Decimal
if isinstance(dec, numbers.Integral):
dec = Decimal(int(dec))
elif not isinstance(dec, Decimal):
raise TypeError(
"%s.from_decimal() only takes Decimals, not %r (%s)" %
(cls.__name__, dec, type(dec).__name__))
if not dec.is_finite():
# Catches infinities and nans.
raise TypeError("Cannot convert %s to %s." % (dec, cls.__name__))
sign, digits, exp = dec.as_tuple()
digits = int(''.join(map(str, digits)))
if sign:
digits = -digits
if exp >= 0:
return cls(digits * 10 ** exp)
else:
return cls(digits, 10 ** -exp)
def limit_denominator(self, max_denominator=1000000):
"""Closest Fraction to self with denominator at most max_denominator.
>>> Fraction('3.141592653589793').limit_denominator(10)
Fraction(22, 7)
>>> Fraction('3.141592653589793').limit_denominator(100)
Fraction(311, 99)
>>> Fraction(4321, 8765).limit_denominator(10000)
Fraction(4321, 8765)
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self._denominator <= max_denominator:
return Fraction(self)
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self._numerator, self._denominator
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Fraction(p0+k*p1, q0+k*q1)
bound2 = Fraction(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
@property
def numerator(a):
return a._numerator
@property
def denominator(a):
return a._denominator
def __repr__(self):
"""repr(self)"""
return ('Fraction(%s, %s)' % (self._numerator, self._denominator))
def __str__(self):
"""str(self)"""
if self._denominator == 1:
return str(self._numerator)
else:
return '%s/%s' % (self._numerator, self._denominator)
def _operator_fallbacks(monomorphic_operator, fallback_operator):
"""Generates forward and reverse operators given a purely-rational
operator and a function from the operator module.
Use this like:
__op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
In general, we want to implement the arithmetic operations so
that mixed-mode operations either call an implementation whose
author knew about the types of both arguments, or convert both
to the nearest built in type and do the operation there. In
Fraction, that means that we define __add__ and __radd__ as:
def __add__(self, other):
# Both types have numerators/denominator attributes,
# so do the operation directly
if isinstance(other, (int, long, Fraction)):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
# float and complex don't have those operations, but we
# know about those types, so special case them.
elif isinstance(other, float):
return float(self) + other
elif isinstance(other, complex):
return complex(self) + other
# Let the other type take over.
return NotImplemented
def __radd__(self, other):
# radd handles more types than add because there's
# nothing left to fall back to.
if isinstance(other, Rational):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
return NotImplemented
There are 5 different cases for a mixed-type addition on
Fraction. I'll refer to all of the above code that doesn't
refer to Fraction, float, or complex as "boilerplate". 'r'
will be an instance of Fraction, which is a subtype of
Rational (r : Fraction <: Rational), and b : B <:
Complex. The first three involve 'r + b':
1. If B <: Fraction, int, float, or complex, we handle
that specially, and all is well.
2. If Fraction falls back to the boilerplate code, and it
were to return a value from __add__, we'd miss the
possibility that B defines a more intelligent __radd__,
so the boilerplate should return NotImplemented from
__add__. In particular, we don't handle Rational
here, even though we could get an exact answer, in case
the other type wants to do something special.
3. If B <: Fraction, Python tries B.__radd__ before
Fraction.__add__. This is ok, because it was
implemented with knowledge of Fraction, so it can
handle those instances before delegating to Real or
Complex.
The next two situations describe 'b + r'. We assume that b
didn't know about Fraction in its implementation, and that it
uses similar boilerplate code:
4. If B <: Rational, then __radd_ converts both to the
builtin rational type (hey look, that's us) and
proceeds.
5. Otherwise, __radd__ tries to find the nearest common
base ABC, and fall back to its builtin type. Since this
class doesn't subclass a concrete type, there's no
implementation to fall back to, so we need to try as
hard as possible to return an actual value, or the user
will get a TypeError.
"""
def forward(a, b):
if isinstance(b, (int, long, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
return Fraction(a.numerator * b.denominator +
b.numerator * a.denominator,
a.denominator * b.denominator)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
def _sub(a, b):
"""a - b"""
return Fraction(a.numerator * b.denominator -
b.numerator * a.denominator,
a.denominator * b.denominator)
__sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
def _mul(a, b):
"""a * b"""
return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
__mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
def _div(a, b):
"""a / b"""
return Fraction(a.numerator * b.denominator,
a.denominator * b.numerator)
__truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
__div__, __rdiv__ = _operator_fallbacks(_div, operator.div)
def __floordiv__(a, b):
"""a // b"""
# Will be math.floor(a / b) in 3.0.
div = a / b
if isinstance(div, Rational):
# trunc(math.floor(div)) doesn't work if the rational is
# more precise than a float because the intermediate
# rounding may cross an integer boundary.
return div.numerator // div.denominator
else:
return math.floor(div)
def __rfloordiv__(b, a):
"""a // b"""
# Will be math.floor(a / b) in 3.0.
div = a / b
if isinstance(div, Rational):
# trunc(math.floor(div)) doesn't work if the rational is
# more precise than a float because the intermediate
# rounding may cross an integer boundary.
return div.numerator // div.denominator
else:
return math.floor(div)
def __mod__(a, b):
"""a % b"""
div = a // b
return a - b * div
def __rmod__(b, a):
"""a % b"""
div = a // b
return a - b * div
def __pow__(a, b):
"""a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
"""
if isinstance(b, Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power)
else:
return Fraction(a._denominator ** -power,
a._numerator ** -power)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
def __rpow__(b, a):
"""a ** b"""
if b._denominator == 1 and b._numerator >= 0:
# If a is an int, keep it that way if possible.
return a ** b._numerator
if isinstance(a, Rational):
return Fraction(a.numerator, a.denominator) ** b
if b._denominator == 1:
return a ** b._numerator
return a ** float(b)
def __pos__(a):
"""+a: Coerces a subclass instance to Fraction"""
return Fraction(a._numerator, a._denominator)
def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator)
def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator)
def __trunc__(a):
"""trunc(a)"""
if a._numerator < 0:
return -(-a._numerator // a._denominator)
else:
return a._numerator // a._denominator
def __hash__(self):
"""hash(self)
Tricky because values that are exactly representable as a
float must have the same hash as that float.
"""
# XXX since this method is expensive, consider caching the result
if self._denominator == 1:
# Get integers right.
return hash(self._numerator)
# Expensive check, but definitely correct.
if self == float(self):
return hash(float(self))
else:
# Use tuple's hash to avoid a high collision rate on
# simple fractions.
return hash((self._numerator, self._denominator))
def __eq__(a, b):
"""a == b"""
if isinstance(b, Rational):
return (a._numerator == b.numerator and
a._denominator == b.denominator)
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
if math.isnan(b) or math.isinf(b):
# comparisons with an infinity or nan should behave in
# the same way for any finite a, so treat a as zero.
return 0.0 == b
else:
return a == a.from_float(b)
else:
# Since a doesn't know how to compare with b, let's give b
# a chance to compare itself with a.
return NotImplemented
def _richcmp(self, other, op):
"""Helper for comparison operators, for internal use only.
Implement comparison between a Rational instance `self`, and
either another Rational instance or a float `other`. If
`other` is not a Rational instance or a float, return
NotImplemented. `op` should be one of the six standard
comparison operators.
"""
# convert other to a Rational instance where reasonable.
if isinstance(other, Rational):
return op(self._numerator * other.denominator,
self._denominator * other.numerator)
# comparisons with complex should raise a TypeError, for consistency
# with int<->complex, float<->complex, and complex<->complex comparisons.
if isinstance(other, complex):
raise TypeError("no ordering relation is defined for complex numbers")
if isinstance(other, float):
if math.isnan(other) or math.isinf(other):
return op(0.0, other)
else:
return op(self, self.from_float(other))
else:
return NotImplemented
def __lt__(a, b):
"""a < b"""
return a._richcmp(b, operator.lt)
def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt)
def __le__(a, b):
"""a <= b"""
return a._richcmp(b, operator.le)
def __ge__(a, b):
"""a >= b"""
return a._richcmp(b, operator.ge)
def __nonzero__(a):
"""a != 0"""
return a._numerator != 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Fraction:
return self # I'm immutable; therefore I am my own clone
return self.__class__(self._numerator, self._denominator)
def __deepcopy__(self, memo):
if type(self) == Fraction:
return self # My components are also immutable
return self.__class__(self._numerator, self._denominator)
|
gpl-3.0
|
vanessajurtz/lasagne4bio
|
peptide_MHCII/scripts/seq2bl.py
|
1
|
7560
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import subprocess
import sys
import os
import numpy as np
from scipy.io import netcdf
##########################################################################
# FUNCTIONS
##########################################################################
def read_blosum(filename):
'''
read in BLOSUM matrix
parameters:
- filename : file containing BLOSUM matrix
returns:
- blosum : dictionnary AA -> blosum encoding (as array)
'''
# read BLOSUM matrix:
blosumfile = open(filename, "r")
blosum = {}
B_idx = []
J_idx = []
Z_idx = []
star_idx = []
for l in blosumfile:
l = l.strip()
if l[0] == '#':
l = l.strip("#")
l = l.split(" ")
l = list(filter(None, l))
if l[0] == "A":
try:
B_idx = l.index('B')
except:
B_idx = 99
try:
J_idx = l.index('J')
except:
J_idx = 99
try:
Z_idx = l.index('Z')
except:
Z_idx = 99
star_idx = l.index('*')
else:
l = l.split(" ")
l = list(filter(None, l))
aa = str(l[0])
if (aa != 'B') & (aa != 'J') & (aa != 'Z') & (aa != '*'):
tmp = l[1:len(l)]
# tmp = [float(i) for i in tmp]
# get rid of BJZ*:
tmp2 = []
for i in range(0, len(tmp)):
if (i != B_idx) & (i != J_idx) & (i != Z_idx) & (i != star_idx):
tmp2.append(0.1*float(tmp[i])) # divide by 10
#save in BLOSUM matrix
blosum[aa]=tmp2
blosumfile.close()
return(blosum)
def read_MHC_pseudo_seq(filename):
'''
read in MHC pseudo sequence
parameters:
- filename : file containing MHC pseudo sequences
returns:
- mhc : dictionnary mhc -> AA sequence (as string)
- mhc_seq_len : number of AA in mhc pseudo sequence
'''
# read MHC pseudo sequence:
mhcfile=open(filename, "r")
mhc={}
mhc_seq_len=None
for l in mhcfile:
l=l.strip()
tmp=l
l=tmp.split(" ")
if len(l) < 2:
l=tmp.split("\t")
l=list(filter(None, l))
mhc[l[0]]=l[1]
if mhc_seq_len == None:
mhc_seq_len = len(l[1])
mhcfile.close()
return mhc, mhc_seq_len
def pep2netcdf(filename, peptides, peplength, mhcseqs, mhclength, targets):
'''
save peptide, MHC and target data as NetCDF file
parameters:
- filename : file to store data in
- peptides : np.ndarray containing encoded peptide sequences
- peplength : np.ndarray containing length of each peptide
- mhcseqs : np.ndarray containing encoded MHC pseudo sequences
- mhclength : np.ndarray containing length of each MHC sequence
- targets : np.ndarray containing targets (log transformed IC 50 values)
'''
# open file:
f=netcdf.netcdf_file(filename, 'w')
# save targets:
f.createDimension('target', len(targets))
target=f.createVariable('target', 'f', ('target',))
target[:]=targets
# save sequence lengths:
f.createDimension('peplen', len(peplength))
peplen=f.createVariable('peplen', 'i', ('peplen',))
peplen[:]=peplength
# save peptides:
f.createDimension('n', peptides.shape[0])
f.createDimension('s', peptides.shape[1])
peptide=f.createVariable('peptide', 'f', ('n', 's'))
peptide[:][:]=peptides
# save sequence lengths:
f.createDimension('mhclen', len(mhclength))
mhclen=f.createVariable('mhclen', 'i', ('mhclen',))
mhclen[:]=mhclength
# save peptides:
f.createDimension('m', mhcseqs.shape[0])
f.createDimension('l', mhcseqs.shape[1])
mhc=f.createVariable('mhc', 'f', ('m', 'l'))
mhc[:][:]=mhcseqs
# close file:
f.close()
##########################################################################
# PARSE COMMANDLINE OPTIONS
##########################################################################
parser=argparse.ArgumentParser()
parser.add_argument('-i', '--infile', help="AA peptide + affinity data")
parser.add_argument('-o', '--outfile', help="outputfile")
parser.add_argument('-m', '--mhc', help="MHC pseudo sequences")
parser.add_argument('-b', '--blosum', help="file with BLOSUM matrix")
args=parser.parse_args()
# open input file:
if args.infile != None:
infile=open(args.infile, "r")
else:
sys.stderr.write("Please specify inputfile!\n")
sys.exit(2)
# open output file:
if args.outfile != None:
outfilename=args.outfile
else:
sys.stderr.write("Please specify outputfile!\n")
sys.exit(2)
# open file with MHC pseudo sequences:
if args.mhc != None:
mhcfilename=args.mhc
else:
sys.stderr.write("Please specify file with MHC pseudo sequences!\n")
sys.exit(2)
# open file with BLOSUM matrix:
if args.blosum != None:
blosumfilename=args.blosum
else:
sys.stderr.write("Please specify file with BLOSUM matrix!\n")
sys.exit(2)
##########################################################################
# READ AND SAVE BLOSUM MATRIX AND MHC PSEUDO SEQUENCE
##########################################################################
blosum = read_blosum(blosumfilename)
mhc,mhc_seq_len = read_MHC_pseudo_seq (mhcfilename)
##########################################################################
# ENCODE DATA:
##########################################################################
# get dimensions:
enclen=21
n_pep=0
n_pep_aa=0
pep_seqs=[]
mhc_seqs=[]
tmp_targets=[]
for l in infile:
l=list(filter(None, l.strip().split()))
# #exclude peptides longer than 20 AA
# if len(l[0]) <= 20:
n_pep += 1
n_pep_aa += len(l[0])
pep_seqs.append(l[0])
mhc_seqs.append(mhc[l[2]])
tmp_targets.append(l[1])
infile.close()
# initialize variables:
peplength=np.zeros((n_pep), dtype=int)
mhclength=np.zeros((n_pep), dtype=int)
peptides=np.zeros((n_pep_aa, enclen), dtype=float)
mhcseqs=np.zeros((n_pep * mhc_seq_len, enclen), dtype=float)
targets=np.zeros((n_pep), dtype=float)
pep_pos=0
mhc_pos=0
# save encoded sequences:
for i in range(0,n_pep):
#if (str(l[2]) == "DRB1_0101") & (len(l[0]) == 15) & (count<200):
#print l
pep_seq=pep_seqs[i]
mhc_seq=mhc_seqs[i]
# save peptide and MHC seq length:
peplength[i] = len(pep_seq)
mhclength[i] = len(mhc_seq)
# encode peptide and save:
for a in pep_seq:
if a in blosum:
peptides[pep_pos] = np.array(blosum[a])
pep_pos +=1
else:
sys.stderr.write("Unknown amino acid in peptides, encoding aborted!\n")
sys.exit(2)
# ecode MHC pseudo sequence and save:
for a in mhc_seq:
if a in blosum:
mhcseqs[mhc_pos] = np.array(blosum[a])
mhc_pos += 1
else:
sys.stderr.write("Unknown amino acid in MHC, encoding aborted!\n")
sys.exit(2)
# save target value (log transformed binding affinity):
targets[i] = tmp_targets[i]
print("peptides.shape:")
print(peptides.shape)
print("mhcseqs.shape:")
print(mhcseqs.shape)
print("targets.shape:")
print(targets.shape)
# save encoded data as netCDF file:
pep2netcdf(outfilename, peptides, peplength, mhcseqs, mhclength, targets)
|
gpl-3.0
|
wonder-sk/QGIS
|
python/plugins/processing/algs/lidar/lastools/lasnoise.py
|
5
|
4112
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
lasnoise.py
---------------------
Date : September 2013 and May 2016
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from future import standard_library
standard_library.install_aliases()
from builtins import str
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .LAStoolsUtils import LAStoolsUtils
from .LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
class lasnoise(LAStoolsAlgorithm):
ISOLATED = "ISOLATED"
STEP_XY = "STEP_XY"
STEP_Z = "STEP_Z"
OPERATION = "OPERATION"
OPERATIONS = ["classify", "remove"]
CLASSIFY_AS = "CLASSIFY_AS"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('lasnoise')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParametersIgnoreClass1GUI()
self.addParametersIgnoreClass2GUI()
self.addParameter(ParameterNumber(lasnoise.ISOLATED,
self.tr("isolated if surrounding cells have only"), 0, None, 5))
self.addParameter(ParameterNumber(lasnoise.STEP_XY,
self.tr("resolution of isolation grid in xy"), 0, None, 4.0))
self.addParameter(ParameterNumber(lasnoise.STEP_Z,
self.tr("resolution of isolation grid in z"), 0, None, 4.0))
self.addParameter(ParameterSelection(lasnoise.OPERATION,
self.tr("what to do with isolated points"), lasnoise.OPERATIONS, 0))
self.addParameter(ParameterNumber(lasnoise.CLASSIFY_AS,
self.tr("classify as"), 0, None, 7))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasnoise")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
self.addParametersIgnoreClass1Commands(commands)
self.addParametersIgnoreClass2Commands(commands)
isolated = self.getParameterValue(lasnoise.ISOLATED)
commands.append("-isolated")
commands.append(str(isolated))
step_xy = self.getParameterValue(lasnoise.STEP_XY)
commands.append("-step_xy")
commands.append(str(step_xy))
step_z = self.getParameterValue(lasnoise.STEP_Z)
commands.append("-step_z")
commands.append(str(step_z))
operation = self.getParameterValue(lasnoise.OPERATION)
if operation != 0:
commands.append("-remove_noise")
else:
commands.append("-classify_as")
classify_as = self.getParameterValue(lasnoise.CLASSIFY_AS)
commands.append(str(classify_as))
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
gpl-2.0
|
nickofbh/kort2
|
app/startup/init_app.py
|
1
|
1559
|
from flask.ext.security import Security, SQLAlchemyUserDatastore
from flask.ext.social import Social, SQLAlchemyConnectionDatastore
from logging.handlers import SMTPHandler
import logging
def init_error_logger_with_email_handler(app):
"""
Initialize a logger to send emails on error-level messages.
Unhandled exceptions will now send an email message to app.config.ADMINS.
"""
if app.debug: return # Do not send error emails while developing
# Retrieve email settings from app.config
host = app.config['MAIL_SERVER']
port = app.config['MAIL_PORT']
from_addr = app.config['MAIL_DEFAULT_SENDER']
username = app.config['MAIL_USERNAME']
password = app.config['MAIL_PASSWORD']
secure = () if app.config.get('MAIL_USE_TLS') else None
# Retrieve app settings from app.config
to_addr_list = app.config['ADMINS']
subject = app.config.get('APP_SYSTEM_ERROR_SUBJECT_LINE', 'System Error')
# Setup an SMTP mail handler for error-level messages
mail_handler = SMTPHandler(
mailhost=(host, port), # Mail host and port
fromaddr=from_addr, # From address
toaddrs=to_addr_list, # To address
subject=subject, # Subject line
credentials=(username, password), # Credentials
secure=secure,
)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
# Log errors using: app.logger.error('Some error message')
|
mit
|
t-tran/libcloud
|
libcloud/common/buddyns.py
|
24
|
2400
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.base import ConnectionKey, JsonResponse
__all__ = [
'API_HOST',
'BuddyNSException',
'BuddyNSResponse',
'BuddyNSConnection'
]
# Endpoint for buddyns api
API_HOST = 'www.buddyns.com'
class BuddyNSResponse(JsonResponse):
errors = []
objects = []
def __init__(self, response, connection):
super(BuddyNSResponse, self).__init__(response=response,
connection=connection)
self.errors, self.objects = self.parse_body_and_errors()
if not self.success():
raise BuddyNSException(code=self.status,
message=self.errors.pop()['detail'])
def parse_body_and_errors(self):
js = super(BuddyNSResponse, self).parse_body()
if 'detail' in js:
self.errors.append(js)
else:
self.objects.append(js)
return self.errors, self.objects
def success(self):
return len(self.errors) == 0
class BuddyNSConnection(ConnectionKey):
host = API_HOST
responseCls = BuddyNSResponse
def add_default_headers(self, headers):
headers['content-type'] = 'application/json'
headers['Authorization'] = 'Token' + ' ' + self.key
return headers
class BuddyNSException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return "%s %s" % (self.code, self.message)
def __repr__(self):
return "BuddyNSException %s %s" % (self.code, self.message)
|
apache-2.0
|
tobykurien/MakerDroid
|
assetsrc/pycam.mp3/src/pycam/Exporters/GCodeExporter.py
|
1
|
13911
|
# -*- coding: utf-8 -*-
"""
$Id: GCodeExporter.py 1092 2011-06-13 14:40:56Z sumpfralle $
Copyright 2010-2011 Lars Kruse <[email protected]>
Copyright 2008-2009 Lode Leroy
This file is part of PyCAM.
PyCAM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyCAM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCAM. If not, see <http://www.gnu.org/licenses/>.
"""
import decimal
import os
DEFAULT_HEADER = ("G40 (disable tool radius compensation)",
"G49 (disable tool length compensation)",
"G80 (cancel modal motion)",
"G54 (select coordinate system 1)",
"G90 (disable incremental moves)")
PATH_MODES = {"exact_path": 0, "exact_stop": 1, "continuous": 2}
MAX_DIGITS = 12
def _get_num_of_significant_digits(number):
""" Determine the number of significant digits of a float number. """
# use only positive numbers
number = abs(number)
max_diff = 0.1 ** MAX_DIGITS
if number <= max_diff:
# input value is smaller than the smallest usable number
return MAX_DIGITS
elif number >= 1:
# no negative number of significant digits
return 0
else:
for digit in range(1, MAX_DIGITS):
shifted = number * (10 ** digit)
if shifted - int(shifted) < max_diff:
return digit
else:
return MAX_DIGITS
def _get_num_converter(step_width):
""" Return a float-to-decimal conversion function with a prevision suitable
for the given step width.
"""
digits = _get_num_of_significant_digits(step_width)
format_string = "%%.%df" % digits
return lambda number: decimal.Decimal(format_string % number)
class GCodeGenerator:
NUM_OF_AXES = 3
def __init__(self, destination, metric_units=True, safety_height=0.0,
toggle_spindle_status=False, spindle_delay=3, header=None,
comment=None, minimum_steps=None, touch_off_on_startup=False,
touch_off_on_tool_change=False, touch_off_position=None,
touch_off_rapid_move=0, touch_off_slow_move=1,
touch_off_slow_feedrate=20, touch_off_height=0,
touch_off_pause_execution=False):
if isinstance(destination, basestring):
# open the file
self.destination = file(destination,"w")
self._close_stream_on_exit = True
else:
# assume that "destination" is something like a StringIO instance
# or an open file
self.destination = destination
# don't close the stream if we did not open it on our own
self._close_stream_on_exit = False
self.safety_height = safety_height
self.toggle_spindle_status = toggle_spindle_status
self.spindle_delay = spindle_delay
self.comment = comment
# define all axes steps and the corresponding formatters
self._axes_formatter = []
if not minimum_steps:
# default: minimum steps for all axes = 0.0001
minimum_steps = [0.0001]
for i in range(self.NUM_OF_AXES):
if i < len(minimum_steps):
step_width = minimum_steps[i]
else:
step_width = minimum_steps[-1]
conv = _get_num_converter(step_width)
self._axes_formatter.append((conv(step_width), conv))
self._finished = False
if comment:
self.add_comment(comment)
if header is None:
self.append(DEFAULT_HEADER)
else:
self.append(header)
if metric_units:
self.append("G21 (metric)")
else:
self.append("G20 (imperial)")
self.last_position = [None, None, None]
self.last_rapid = None
self.last_tool_id = None
self.last_feedrate = 100
if touch_off_on_startup or touch_off_on_tool_change:
self.store_touch_off_position(touch_off_position)
self.touch_off_on_startup = touch_off_on_startup
self.touch_off_on_tool_change = touch_off_on_tool_change
self.touch_off_rapid_move = touch_off_rapid_move
self.touch_off_slow_move = touch_off_slow_move
self.touch_off_slow_feedrate = touch_off_slow_feedrate
self.touch_off_pause_execution = touch_off_pause_execution
self.touch_off_height = touch_off_height
self._on_startup = True
def run_touch_off(self, new_tool_id=None, force_height=None):
# either "new_tool_id" or "force_height" should be specified
self.append("")
self.append("(Start of touch off operation)")
self.append("G90 (disable incremental moves)")
self.append("G49 (disable tool offset compensation)")
self.append("G53 G0 Z#5163 (go to touch off position: z)")
self.append("G28 (go to final touch off position)")
self.append("G91 (enter incremental mode)")
self.append("F%f (reduce feed rate during touch off)" % self.touch_off_slow_feedrate)
if self.touch_off_pause_execution:
self.append("(msg,Pausing before tool change)")
self.append("M0 (pause before touch off)")
# measure the current tool length
if self.touch_off_rapid_move > 0:
self.append("G0 Z-%f (go down rapidly)" % self.touch_off_rapid_move)
self.append("G38.2 Z-%f (do the touch off)" % self.touch_off_slow_move)
if not force_height is None:
self.append("G92 Z%f" % force_height)
self.append("G28 (go up again)")
if not new_tool_id is None:
# compensate the length of the new tool
self.append("#100=#5063 (store current tool length compensation)")
self.append("T%d M6" % new_tool_id)
if self.touch_off_rapid_move > 0:
self.append("G0 Z-%f (go down rapidly)" % self.touch_off_rapid_move)
self.append("G38.2 Z-%f (do the touch off)" % self.touch_off_slow_move)
self.append("G28 (go up again)")
# compensate the tool length difference
self.append("G43.1 Z[#5063-#100] (compensate the new tool length)")
self.append("F%f (restore feed rate)" % self.last_feedrate)
self.append("G90 (disable incremental mode)")
# Move up to a safe height. This is either "safety height" or the touch
# off start location. The highest value of these two is used.
if self.touch_off_on_startup and not self.touch_off_height is None:
touch_off_safety_height = self.touch_off_height + \
self.touch_off_slow_move + self.touch_off_rapid_move
final_height = max(touch_off_safety_height, self.safety_height)
self.append("G0 Z%.3f" % final_height)
else:
# We assume, that the touch off start position is _above_ the
# top of the material. This is documented.
# A proper ("safer") implementation would compare "safety_height"
# with the touch off start location. But this requires "O"-Codes
# which are only usable for EMC2 (probably).
self.append("G53 G0 Z#5163 (go to touch off position: z)")
if self.touch_off_pause_execution:
self.append("(msg,Pausing after tool change)")
self.append("M0 (pause after touch off)")
self.append("(End of touch off operation)")
self.append("")
def store_touch_off_position(self, position):
if position is None:
self.append("G28.1 (store current position for touch off)")
else:
self.append("#5161=%f (touch off position: x)" % position.x)
self.append("#5162=%f (touch off position: y)" % position.y)
self.append("#5163=%f (touch off position: z)" % position.z)
def set_speed(self, feedrate=None, spindle_speed=None):
if not feedrate is None:
self.append("F%.5f" % feedrate)
self.last_feedrate = feedrate
if not spindle_speed is None:
self.append("S%.5f" % spindle_speed)
def set_path_mode(self, mode, motion_tolerance=None,
naive_cam_tolerance=None):
result = ""
if mode == PATH_MODES["exact_path"]:
result = "G61 (exact path mode)"
elif mode == PATH_MODES["exact_stop"]:
result = "G61.1 (exact stop mode)"
elif mode == PATH_MODES["continuous"]:
if motion_tolerance is None:
result = "G64 (continuous mode with maximum speed)"
elif naive_cam_tolerance is None:
result = "G64 P%f (continuous mode with tolerance)" \
% motion_tolerance
else:
result = ("G64 P%f Q%f (continuous mode with tolerance and " \
+ "cleanup)") % (motion_tolerance, naive_cam_tolerance)
else:
raise ValueError("GCodeGenerator: invalid path mode (%s)" \
% str(mode))
self.append(result)
def add_moves(self, moves, tool_id=None, comment=None):
if not comment is None:
self.add_comment(comment)
skip_safety_height_move = False
if not tool_id is None:
if self.last_tool_id == tool_id:
# nothing to be done
pass
elif self.touch_off_on_tool_change and \
not (self.last_tool_id is None):
self.run_touch_off(new_tool_id=tool_id)
skip_safety_height_move = True
else:
self.append("T%d M6" % tool_id)
if self._on_startup and self.touch_off_on_startup:
self.run_touch_off(force_height=self.touch_off_height)
skip_safety_height_move = True
self._on_startup = False
self.last_tool_id = tool_id
# move straight up to safety height
if not skip_safety_height_move:
self.add_move_to_safety()
self.set_spindle_status(True)
for pos, rapid in moves:
self.add_move(pos, rapid=rapid)
# go back to safety height
self.add_move_to_safety()
self.set_spindle_status(False)
# make sure that all sections are independent of each other
self.last_position = [None, None, None]
self.last_rapid = None
def set_spindle_status(self, status):
if self.toggle_spindle_status:
if status:
self.append("M3 (start spindle)")
else:
self.append("M5 (stop spindle)")
self.append("G04 P%d (wait for %d seconds)" % (self.spindle_delay,
self.spindle_delay))
def add_move_to_safety(self):
new_pos = [None, None, self.safety_height]
self.add_move(new_pos, rapid=True)
def add_move(self, position, rapid=False):
""" add the GCode for a machine move to 'position'. Use rapid (G0) or
normal (G01) speed.
@value position: the new position
@type position: Point or list(float)
@value rapid: is this a rapid move?
@type rapid: bool
"""
new_pos = []
for index, attr in enumerate("xyz"):
conv = self._axes_formatter[index][1]
if hasattr(position, attr):
value = getattr(position, attr)
else:
value = position[index]
if value is None:
new_pos.append(None)
else:
new_pos.append(conv(value))
# check if there was a significant move
no_diff = True
for index in range(len(new_pos)):
if new_pos[index] is None:
continue
if self.last_position[index] is None:
no_diff = False
break
diff = abs(new_pos[index] - self.last_position[index])
if diff >= self._axes_formatter[index][0]:
no_diff = False
break
if no_diff:
# we can safely skip this move
return
# compose the position string
pos_string = []
for index, axis_spec in enumerate("XYZ"):
if new_pos[index] is None:
continue
if not self.last_position or \
(new_pos[index] != self.last_position[index]):
pos_string.append("%s%s" % (axis_spec, new_pos[index]))
self.last_position[index] = new_pos[index]
if rapid == self.last_rapid:
prefix = ""
elif rapid:
prefix = "G0"
else:
prefix = "G1"
self.last_rapid = rapid
self.append("%s %s" % (prefix, " ".join(pos_string)))
def finish(self):
self.add_move_to_safety()
self.append("M2 (end program)")
self._finished = True
def add_comment(self, comment):
if isinstance(comment, basestring):
lines = comment.split(os.linesep)
else:
lines = comment
for line in lines:
self.append(";%s" % line)
def append(self, command):
if self._finished:
raise TypeError("GCodeGenerator: can't add further commands to a " \
+ "finished GCodeGenerator instance: %s" % str(command))
if isinstance(command, basestring):
command = [command]
for line in command:
self.destination.write(line + os.linesep)
|
gpl-3.0
|
detrout/pykolab
|
pykolab/setup/setup_imap.py
|
1
|
7351
|
# -*- coding: utf-8 -*-
# Copyright 2010-2012 Kolab Systems AG (http://www.kolabsys.com)
#
# Jeroen van Meeuwen (Kolab Systems) <vanmeeuwen a kolabsys.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 or, at your option, any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
from augeas import Augeas
from Cheetah.Template import Template
import os
import subprocess
import components
import pykolab
from pykolab import utils
from pykolab.constants import *
from pykolab.translate import _
log = pykolab.getLogger('pykolab.setup')
conf = pykolab.getConf()
def __init__():
components.register(
'imap',
execute,
description=description(),
after=['ldap']
)
def description():
return _("Setup IMAP.")
def execute(*args, **kw):
"""
Apply the necessary settings to /etc/imapd.conf
"""
imapd_settings = {
"ldap_servers": conf.get('ldap', 'ldap_uri'),
"ldap_base": conf.get('ldap', 'base_dn'),
"ldap_bind_dn": conf.get('ldap', 'service_bind_dn'),
"ldap_password": conf.get('ldap', 'service_bind_pw'),
"ldap_filter": '(|(&(|(uid=%s)(uid=cyrus-murder))(uid=%%U))(&(|(uid=%%U)(mail=%%U@%%d)(mail=%%U@%%r))(objectclass=kolabinetorgperson)))' % (conf.get('cyrus-imap', 'admin_login')),
"ldap_user_attribute": conf.get('cyrus-sasl', 'result_attribute'),
"ldap_group_base": conf.get('ldap', 'base_dn'),
"ldap_group_filter": "(&(cn=%u)(objectclass=ldapsubentry)(objectclass=nsroledefinition))",
"ldap_group_scope": "one",
"ldap_member_base": conf.get('ldap','user_base_dn'),
"ldap_member_method": "attribute",
"ldap_member_attribute": "nsrole",
"admins": conf.get('cyrus-imap', 'admin_login'),
"postuser": "shared",
}
template_file = None
if os.path.isfile('/etc/kolab/templates/imapd.conf.tpl'):
template_file = '/etc/kolab/templates/imapd.conf.tpl'
elif os.path.isfile('/usr/share/kolab/templates/imapd.conf.tpl'):
template_file = '/usr/share/kolab/templates/imapd.conf.tpl'
elif os.path.isfile(os.path.abspath(os.path.join(__file__, '..', '..', '..', 'share', 'templates', 'imapd.conf.tpl'))):
template_file = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'share', 'templates', 'imapd.conf.tpl'))
if not template_file == None:
fp = open(template_file, 'r')
template_definition = fp.read()
fp.close()
t = Template(template_definition, searchList=[imapd_settings])
fp = open('/etc/imapd.conf', 'w')
fp.write(t.__str__())
fp.close()
else:
log.error(_("Could not write out Cyrus IMAP configuration file /etc/imapd.conf"))
return
cyrus_settings = {}
template_file = None
if os.path.isfile('/etc/kolab/templates/cyrus.conf.tpl'):
template_file = '/etc/kolab/templates/cyrus.conf.tpl'
elif os.path.isfile('/usr/share/kolab/templates/cyrus.conf.tpl'):
template_file = '/usr/share/kolab/templates/cyrus.conf.tpl'
elif os.path.isfile(os.path.abspath(os.path.join(__file__, '..', '..', '..', 'share', 'templates', 'cyrus.conf.tpl'))):
template_file = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'share', 'templates', 'cyrus.conf.tpl'))
if not template_file == None:
fp = open(template_file, 'r')
template_definition = fp.read()
fp.close()
t = Template(template_definition, searchList=[cyrus_settings])
fp = open('/etc/cyrus.conf', 'w')
fp.write(t.__str__())
fp.close()
else:
log.error(_("Could not write out Cyrus IMAP configuration file /etc/imapd.conf"))
return
annotations = [
"/vendor/horde/share-params,mailbox,string,backend,value.shared value.priv,a",
"/vendor/kolab/color,mailbox,string,backend,value.shared value.priv,a",
"/vendor/kolab/folder-test,mailbox,string,backend,value.shared value.priv,a",
"/vendor/kolab/folder-type,mailbox,string,backend,value.shared value.priv,a",
"/vendor/kolab/incidences-for,mailbox,string,backend,value.shared value.priv,a",
"/vendor/kolab/pxfb-readable-for,mailbox,string,backend,value.shared value.priv,a",
"/vendor/kolab/h-share-attr-desc,mailbox,string,backend,value.shared value.priv,a",
"/vendor/kolab/activesync,mailbox,string,backend,value.priv,r",
"/vendor/x-toltec/test,mailbox,string,backend,value.shared value.priv,a",
]
fp = open('/etc/imapd.annotations.conf', 'w')
fp.write("\n".join(annotations))
fp.close()
if os.path.isfile('/etc/default/kolab-saslauthd'):
myaugeas = Augeas()
setting = os.path.join('/files/etc/default/kolab-saslauthd','START')
if not myaugeas.get(setting) == 'yes':
myaugeas.set(setting,'yes')
myaugeas.save()
myaugeas.close()
if os.path.isfile('/bin/systemctl'):
subprocess.call(['systemctl', 'stop', 'saslauthd.service'])
subprocess.call(['systemctl', 'restart', 'kolab-saslauthd.service'])
subprocess.call(['systemctl', 'restart', 'cyrus-imapd.service'])
elif os.path.isfile('/sbin/service'):
subprocess.call(['service', 'saslauthd', 'stop'])
subprocess.call(['service', 'kolab-saslauthd', 'restart'])
subprocess.call(['service', 'cyrus-imapd', 'restart'])
elif os.path.isfile('/usr/sbin/service'):
subprocess.call(['/usr/sbin/service','saslauthd','stop'])
subprocess.call(['/usr/sbin/service','kolab-saslauthd','restart'])
subprocess.call(['/usr/sbin/service','cyrus-imapd','restart'])
else:
log.error(_("Could not start the cyrus-imapd and kolab-saslauthd services."))
if os.path.isfile('/bin/systemctl'):
subprocess.call(['systemctl', 'disable', 'saslauthd.service'])
subprocess.call(['systemctl', 'enable', 'kolab-saslauthd.service'])
subprocess.call(['systemctl', 'enable', 'cyrus-imapd.service'])
elif os.path.isfile('/sbin/chkconfig'):
subprocess.call(['chkconfig', 'saslauthd', 'off'])
subprocess.call(['chkconfig', 'kolab-saslauthd', 'on'])
subprocess.call(['chkconfig', 'cyrus-imapd', 'on'])
elif os.path.isfile('/usr/sbin/update-rc.d'):
subprocess.call(['/usr/sbin/update-rc.d', 'saslauthd', 'disable'])
subprocess.call(['/usr/sbin/update-rc.d', 'kolab-saslauthd', 'defaults'])
subprocess.call(['/usr/sbin/update-rc.d', 'cyrus-imapd', 'defaults'])
else:
log.error(_("Could not configure to start on boot, the " + \
"cyrus-imapd and kolab-saslauthd services."))
|
gpl-3.0
|
hoosteeno/mozillians
|
vendor-local/lib/python/rest_framework/tests/test_serializer_bulk_update.py
|
21
|
8866
|
"""
Tests to cover bulk create and update using serializers.
"""
from __future__ import unicode_literals
from django.test import TestCase
from rest_framework import serializers
class BulkCreateSerializerTests(TestCase):
"""
Creating multiple instances using serializers.
"""
def setUp(self):
class BookSerializer(serializers.Serializer):
id = serializers.IntegerField()
title = serializers.CharField(max_length=100)
author = serializers.CharField(max_length=100)
self.BookSerializer = BookSerializer
def test_bulk_create_success(self):
"""
Correct bulk update serialization should return the input data.
"""
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 1,
'title': 'If this is a man',
'author': 'Primo Levi'
}, {
'id': 2,
'title': 'The wind-up bird chronicle',
'author': 'Haruki Murakami'
}
]
serializer = self.BookSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, data)
def test_bulk_create_errors(self):
"""
Correct bulk update serialization should return the input data.
"""
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 1,
'title': 'If this is a man',
'author': 'Primo Levi'
}, {
'id': 'foo',
'title': 'The wind-up bird chronicle',
'author': 'Haruki Murakami'
}
]
expected_errors = [
{},
{},
{'id': ['Enter a whole number.']}
]
serializer = self.BookSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), False)
self.assertEqual(serializer.errors, expected_errors)
def test_invalid_list_datatype(self):
"""
Data containing list of incorrect data type should return errors.
"""
data = ['foo', 'bar', 'baz']
serializer = self.BookSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), False)
expected_errors = [
{'non_field_errors': ['Invalid data']},
{'non_field_errors': ['Invalid data']},
{'non_field_errors': ['Invalid data']}
]
self.assertEqual(serializer.errors, expected_errors)
def test_invalid_single_datatype(self):
"""
Data containing a single incorrect data type should return errors.
"""
data = 123
serializer = self.BookSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), False)
expected_errors = {'non_field_errors': ['Expected a list of items.']}
self.assertEqual(serializer.errors, expected_errors)
def test_invalid_single_object(self):
"""
Data containing only a single object, instead of a list of objects
should return errors.
"""
data = {
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}
serializer = self.BookSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), False)
expected_errors = {'non_field_errors': ['Expected a list of items.']}
self.assertEqual(serializer.errors, expected_errors)
class BulkUpdateSerializerTests(TestCase):
"""
Updating multiple instances using serializers.
"""
def setUp(self):
class Book(object):
"""
A data type that can be persisted to a mock storage backend
with `.save()` and `.delete()`.
"""
object_map = {}
def __init__(self, id, title, author):
self.id = id
self.title = title
self.author = author
def save(self):
Book.object_map[self.id] = self
def delete(self):
del Book.object_map[self.id]
class BookSerializer(serializers.Serializer):
id = serializers.IntegerField()
title = serializers.CharField(max_length=100)
author = serializers.CharField(max_length=100)
def restore_object(self, attrs, instance=None):
if instance:
instance.id = attrs['id']
instance.title = attrs['title']
instance.author = attrs['author']
return instance
return Book(**attrs)
self.Book = Book
self.BookSerializer = BookSerializer
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 1,
'title': 'If this is a man',
'author': 'Primo Levi'
}, {
'id': 2,
'title': 'The wind-up bird chronicle',
'author': 'Haruki Murakami'
}
]
for item in data:
book = Book(item['id'], item['title'], item['author'])
book.save()
def books(self):
"""
Return all the objects in the mock storage backend.
"""
return self.Book.object_map.values()
def test_bulk_update_success(self):
"""
Correct bulk update serialization should return the input data.
"""
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 2,
'title': 'Kafka on the shore',
'author': 'Haruki Murakami'
}
]
serializer = self.BookSerializer(self.books(), data=data, many=True, allow_add_remove=True)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.data, data)
serializer.save()
new_data = self.BookSerializer(self.books(), many=True).data
self.assertEqual(data, new_data)
def test_bulk_update_and_create(self):
"""
Bulk update serialization may also include created items.
"""
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 3,
'title': 'Kafka on the shore',
'author': 'Haruki Murakami'
}
]
serializer = self.BookSerializer(self.books(), data=data, many=True, allow_add_remove=True)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.data, data)
serializer.save()
new_data = self.BookSerializer(self.books(), many=True).data
self.assertEqual(data, new_data)
def test_bulk_update_invalid_create(self):
"""
Bulk update serialization without allow_add_remove may not create items.
"""
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 3,
'title': 'Kafka on the shore',
'author': 'Haruki Murakami'
}
]
expected_errors = [
{},
{'non_field_errors': ['Cannot create a new item, only existing items may be updated.']}
]
serializer = self.BookSerializer(self.books(), data=data, many=True)
self.assertEqual(serializer.is_valid(), False)
self.assertEqual(serializer.errors, expected_errors)
def test_bulk_update_error(self):
"""
Incorrect bulk update serialization should return error data.
"""
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 'foo',
'title': 'Kafka on the shore',
'author': 'Haruki Murakami'
}
]
expected_errors = [
{},
{'id': ['Enter a whole number.']}
]
serializer = self.BookSerializer(self.books(), data=data, many=True, allow_add_remove=True)
self.assertEqual(serializer.is_valid(), False)
self.assertEqual(serializer.errors, expected_errors)
|
bsd-3-clause
|
listingmirror/boto
|
boto/kms/exceptions.py
|
135
|
1523
|
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class InvalidGrantTokenException(BotoServerError):
pass
class DisabledException(BotoServerError):
pass
class LimitExceededException(BotoServerError):
pass
class DependencyTimeoutException(BotoServerError):
pass
class InvalidMarkerException(BotoServerError):
pass
class AlreadyExistsException(BotoServerError):
pass
class InvalidCiphertextException(BotoServerError):
pass
class KeyUnavailableException(BotoServerError):
pass
class InvalidAliasNameException(BotoServerError):
pass
class UnsupportedOperationException(BotoServerError):
pass
class InvalidArnException(BotoServerError):
pass
class KMSInternalException(BotoServerError):
pass
class InvalidKeyUsageException(BotoServerError):
pass
class MalformedPolicyDocumentException(BotoServerError):
pass
class NotFoundException(BotoServerError):
pass
|
mit
|
jtaghiyar/kronos
|
kronos/kronos.py
|
1
|
9365
|
"""
Created on Mar 10, 2014
@author: jtaghiyar
"""
import os
import subprocess as sub
from kronos_version import kronos_version
from utils import Pipeline
from helpers import make_dir, Configurer
from workflow_manager import WorkFlow, WorkFlowManager
from plumber import Plumber
from string import Template
from tempfile import NamedTemporaryFile as ntf
import logging
logging.basicConfig(format='%(asctime)s %(message)s',
level=logging.DEBUG)
class Factory(object):
"""
create, run, manage pipelines.
"""
def __init__(self, args):
"""initialize."""
self.args = args
self.pipelines = []
self.c = Configurer()
make_dir(self.args.working_dir)
def make_config(self):
"""make a yaml config file."""
file_name = os.path.join(self.args.working_dir, self.args.output_filename + '.yaml')
config_dict = Configurer.make_config_dict(self.args.components)
Configurer.print2yaml(config_dict, file_name)
def init_pipeline(self):
"""initialize a new pipeline."""
## update __SAMPLES__, __SHARED__ and __GENERAL__ sections.
self.c.config_file = self.args.config_file
if self.args.input_samples:
self.c.update_sample_section(self.args.input_samples)
if self.args.setup_file:
self.c.update_shared_section(self.args.setup_file)
## make a copy of the updated config file in the working directory.
updated_cfgfile = os.path.basename(self.args.config_file).split('.yaml')[0]
updated_cfgfile = os.path.join(self.args.working_dir, updated_cfgfile + '_kronos.yaml')
Configurer.print2yaml(self.c.config_dict, updated_cfgfile)
## create a work flow from updated config file
wf = WorkFlow(updated_cfgfile)
samples = wf.get_samples()
if not samples:
self._make_intermediate_pipeline(self.args.pipeline_name, updated_cfgfile, None)
else:
for sample_id, sample_dict in samples.iteritems():
new_config_file = self._make_intermediate_config_file(sample_id, sample_dict)
pipeline_name = sample_id + '_' + self.args.pipeline_name
self._make_intermediate_pipeline(pipeline_name, new_config_file, sample_id)
self._paste_pipelines(updated_cfgfile)
def run_pipeline(self):
"""run Kronos-made pipeline with optional initialization."""
if self.args.config_file:
if not self.args.pipeline_name:
bname = os.path.basename(self.args.config_file)
self.args.pipeline_name = os.path.splitext(bname)[0]
self.init_pipeline()
self.args.kronos_pipeline = os.path.join(self.args.working_dir,
self.args.pipeline_name + '.py')
## TODO: check if the -k input has been generated with kronos.
cmd = "{python_installation} {kronos_pipeline} -b {job_scheduler} "
cmd += "-c {components_dir} -d {drmaa_library_path} -j {num_jobs} "
cmd += "-n {num_pipelines} -p {python_installation} -w {working_dir}"
cmd = cmd.format(**vars(self.args))
if self.args.qsub_options:
cmd += " -q '%s'" % (self.args.qsub_options)
if self.args.run_id:
cmd += " -r '%s'" % (self.args.run_id)
if self.args.pipeline_name:
cmd += " -e '%s'" % (self.args.pipeline_name)
if self.args.no_prefix:
cmd += " --no_prefix"
logging.info('running the command: %s' % (cmd))
proc = sub.Popen(cmd, stdout=sub.PIPE, stderr=sub.PIPE, shell=True)
cmdout, cmderr = proc.communicate()
if cmdout:
logging.info(cmdout)
if cmderr:
logging.warning(cmderr)
def update_config(self):
old_config_file = self.args.config_files[0]
new_config_file = self.args.config_files[1]
file_name = os.path.join(self.args.working_dir,
self.args.output_filename + '.yaml')
new_config_dict = Configurer.update_config_files(old_config_file, new_config_file)
Configurer.print2yaml(new_config_dict, file_name)
def _make_intermediate_config_file(self, sample_id, sample_dict):
"""make an intermediate config file from the original config_file."""
intermediate_dir = os.path.join(self.args.working_dir, 'intermediate_config_files')
make_dir(intermediate_dir)
temp_name = os.path.splitext(os.path.basename(self.args.config_file))[0] + '_kronos'
new_file_name = os.path.join(intermediate_dir, sample_id + '_' + temp_name + '.yaml')
new_config_dict = self.c.update_config_dict(sample_dict)
Configurer.print2yaml(new_config_dict, new_file_name)
return new_file_name
def _make_intermediate_pipeline(self, pipeline_name, config_file, sample_id):
"""make an intermediate pipeline script from the intermediate config file."""
intermediate_dir = os.path.join(self.args.working_dir, 'intermediate_pipeline_scripts')
make_dir(intermediate_dir)
p = self._make_pipeline(pipeline_name, config_file, intermediate_dir, sample_id)
self.pipelines.append(p)
def _make_pipeline(self, pipeline_name, config_file, script_dir, sample_id):
p = Pipeline(pipeline_name = pipeline_name,
config_file = config_file,
script_dir = script_dir,
sample_id = sample_id)
p.make_script(sample_id)
return p
def _paste_pipelines(self, config_file):
"""merge intermediate pipelines."""
pipeline_script = os.path.join(self.args.working_dir, self.args.pipeline_name + '.py')
with open(pipeline_script, 'w') as ps:
plumber = Plumber(ps, None)
plumber.paste_pipelines(self.pipelines, config_file)
def test(self):
pycmd = self.args.python_installation
tests = list()
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path, '../test/')
# tests.append(os.path.join(path, 'tester_io_manager.py'))
tests.append(os.path.join(path, 'tester.py'))
for test in tests:
os.system('{0} {1}'.format(pycmd, test))
def make_component(self):
output_dir = os.path.abspath(self.args.working_dir)
comp_name = self.args.component_name
comp_path = os.path.join(output_dir, comp_name)
## make the component directory
if os.path.exists(comp_path):
msg = ("There is already a component with name '{0}' "
" in the given path {1}").format(comp_name, comp_path)
raise Exception(msg)
else:
os.mkdir(comp_path)
os.mkdir(os.path.join(comp_path, 'component_seed'))
def _make_new_file_and_replace_comp_name(template_file, comp_name):
with open(template_file, 'r') as tf:
new_filename = os.path.basename(template_file)
new_file = open(os.path.join(comp_path, new_filename), 'w')
t = Template(tf.read())
new_file.write(t.substitute(COMPONENT_NAME=comp_name))
new_file.close()
package_path = os.path.dirname(os.path.realpath(__file__))
templates_path = os.path.join(package_path, '../templates')
component_ui = os.path.join(templates_path, 'component_ui.py')
component_main = os.path.join(templates_path, 'component_main.py')
component_reqs = os.path.join(templates_path, 'component_reqs.py')
component_params = os.path.join(templates_path, 'component_params.py')
_make_new_file_and_replace_comp_name(component_ui, comp_name)
_make_new_file_and_replace_comp_name(component_main, comp_name)
_make_new_file_and_replace_comp_name(component_reqs, comp_name)
_make_new_file_and_replace_comp_name(component_params, comp_name)
## create the __init__.py inside the component package
init_file = open(os.path.join(comp_path, '__init__.py'), 'w')
init_file.close()
def main():
import kronosui
args = kronosui.args
logging.info("<<< kronos_" + kronos_version + " started >>>")
factory = Factory(args)
if args.subparser_name == 'make_config':
logging.info("making a config file ...")
factory.make_config()
elif args.subparser_name == 'init':
logging.info("initializing the pipeline ...")
factory.init_pipeline()
elif args.subparser_name == 'test':
factory.test()
elif args.subparser_name == 'make_component':
logging.info("making a component")
factory.make_component()
elif args.subparser_name == 'update_config':
logging.info("updating config files ...")
factory.update_config()
elif args.subparser_name == 'run':
logging.info("running the pipeline ...")
factory.run_pipeline()
logging.info("<<< kronos_" + kronos_version + " finished >>>")
if __name__ == '__main__':
main()
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.