repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
haruwo/thrift-with-java-annotation-support | lib/py/src/transport/TSSLSocket.py | 55 | 8258 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import socket
import ssl
from thrift.transport import TSocket
from thrift.transport.TTransport import TTransportException
class TSSLSocket(TSocket.TSocket):
"""
SSL implementation of client-side TSocket
This class creates outbound sockets wrapped using the
python standard ssl module for encrypted connections.
The protocol used is set using the class variable
SSL_VERSION, which must be one of ssl.PROTOCOL_* and
defaults to ssl.PROTOCOL_TLSv1 for greatest security.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host='localhost',
port=9090,
validate=True,
ca_certs=None,
keyfile=None,
certfile=None,
unix_socket=None,
ciphers=None):
"""Create SSL TSocket
@param validate: Set to False to disable SSL certificate validation
@type validate: bool
@param ca_certs: Filename to the Certificate Authority pem file, possibly a
file downloaded from: http://curl.haxx.se/ca/cacert.pem This is passed to
the ssl_wrap function as the 'ca_certs' parameter.
@type ca_certs: str
@param keyfile: The private key
@type keyfile: str
@param certfile: The cert file
@type certfile: str
@param ciphers: The cipher suites to allow. This is passed to
the ssl_wrap function as the 'ciphers' parameter.
@type ciphers: str
Raises an IOError exception if validate is True and the ca_certs file is
None, not present or unreadable.
"""
self.validate = validate
self.is_valid = False
self.peercert = None
if not validate:
self.cert_reqs = ssl.CERT_NONE
else:
self.cert_reqs = ssl.CERT_REQUIRED
self.ca_certs = ca_certs
self.keyfile = keyfile
self.certfile = certfile
self.ciphers = ciphers
if validate:
if ca_certs is None or not os.access(ca_certs, os.R_OK):
raise IOError('Certificate Authority ca_certs file "%s" '
'is not readable, cannot validate SSL '
'certificates.' % (ca_certs))
TSocket.TSocket.__init__(self, host, port, unix_socket)
def open(self):
try:
res0 = self._resolveAddr()
for res in res0:
sock_family, sock_type = res[0:2]
ip_port = res[4]
plain_sock = socket.socket(sock_family, sock_type)
self.handle = ssl.wrap_socket(plain_sock,
ssl_version=self.SSL_VERSION,
do_handshake_on_connect=True,
ca_certs=self.ca_certs,
keyfile=self.keyfile,
certfile=self.certfile,
cert_reqs=self.cert_reqs,
ciphers=self.ciphers)
self.handle.settimeout(self._timeout)
try:
self.handle.connect(ip_port)
except socket.error as e:
if res is not res0[-1]:
continue
else:
raise e
break
except socket.error as e:
if self._unix_socket:
message = 'Could not connect to secure socket %s: %s' \
% (self._unix_socket, e)
else:
message = 'Could not connect to %s:%d: %s' % (self.host, self.port, e)
raise TTransportException(type=TTransportException.NOT_OPEN,
message=message)
if self.validate:
self._validate_cert()
def _validate_cert(self):
"""internal method to validate the peer's SSL certificate, and to check the
commonName of the certificate to ensure it matches the hostname we
used to make this connection. Does not support subjectAltName records
in certificates.
raises TTransportException if the certificate fails validation.
"""
cert = self.handle.getpeercert()
self.peercert = cert
if 'subject' not in cert:
raise TTransportException(
type=TTransportException.NOT_OPEN,
message='No SSL certificate found from %s:%s' % (self.host, self.port))
fields = cert['subject']
for field in fields:
# ensure structure we get back is what we expect
if not isinstance(field, tuple):
continue
cert_pair = field[0]
if len(cert_pair) < 2:
continue
cert_key, cert_value = cert_pair[0:2]
if cert_key != 'commonName':
continue
certhost = cert_value
# this check should be performed by some sort of Access Manager
if certhost == self.host:
# success, cert commonName matches desired hostname
self.is_valid = True
return
else:
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Hostname we connected to "%s" doesn\'t match certificate '
'provided commonName "%s"' % (self.host, certhost))
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Could not validate SSL certificate from '
'host "%s". Cert=%s' % (self.host, cert))
class TSSLServerSocket(TSocket.TServerSocket):
"""SSL implementation of TServerSocket
This uses the ssl module's wrap_socket() method to provide SSL
negotiated encryption.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host=None,
port=9090,
certfile='cert.pem',
unix_socket=None,
ciphers=None):
"""Initialize a TSSLServerSocket
@param certfile: filename of the server certificate, defaults to cert.pem
@type certfile: str
@param host: The hostname or IP to bind the listen socket to,
i.e. 'localhost' for only allowing local network connections.
Pass None to bind to all interfaces.
@type host: str
@param port: The port to listen on for inbound connections.
@type port: int
@param ciphers: The cipher suites to allow. This is passed to
the ssl_wrap function as the 'ciphers' parameter.
@type ciphers: str
"""
self.setCertfile(certfile)
TSocket.TServerSocket.__init__(self, host, port)
self.ciphers = ciphers
def setCertfile(self, certfile):
"""Set or change the server certificate file used to wrap new connections.
@param certfile: The filename of the server certificate,
i.e. '/etc/certs/server.pem'
@type certfile: str
Raises an IOError exception if the certfile is not present or unreadable.
"""
if not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self.certfile = certfile
def accept(self):
plain_client, addr = self.handle.accept()
try:
client = ssl.wrap_socket(plain_client, certfile=self.certfile,
server_side=True, ssl_version=self.SSL_VERSION,
ciphers=self.ciphers)
except ssl.SSLError as ssl_exc:
# failed handshake/ssl wrap, close socket to client
plain_client.close()
# raise ssl_exc
# We can't raise the exception, because it kills most TServer derived
# serve() methods.
# Instead, return None, and let the TServer instance deal with it in
# other exception handling. (but TSimpleServer dies anyway)
return None
result = TSocket.TSocket()
result.setHandle(client)
return result
| apache-2.0 |
ChrisCinelli/pgessays | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/universaldetector.py | 744 | 6831 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| bsd-2-clause |
Intel-tensorflow/tensorflow | tensorflow/lite/tools/optimize/debugging/python/debugger_test.py | 3 | 13121 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for QuantizationDebugger."""
import csv
import io
import re
from unittest import mock
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.lite.python import convert
from tensorflow.lite.python import lite
from tensorflow.lite.tools.optimize.debugging.python import debugger
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import tracking
# pylint: disable=g-import-not-at-top
try:
from tensorflow.lite.python import metrics_portable as metrics
except ImportError:
from tensorflow.lite.python import metrics_nonportable as metrics
# pylint: enable=g-import-not-at-top
def _get_model():
"""Returns somple model with Conv2D and representative dataset gen."""
root = tracking.AutoTrackable()
kernel_in = np.array([-2, -1, 1, 2], dtype=np.float32).reshape((2, 2, 1, 1))
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 3, 3, 1], dtype=tf.float32)])
def func(inp):
kernel = tf.constant(kernel_in, dtype=tf.float32)
conv = tf.nn.conv2d(inp, kernel, strides=1, padding='SAME')
output = tf.nn.relu(conv, name='output')
return output
root.f = func
to_save = root.f.get_concrete_function()
return to_save
def _calibration_gen():
for i in range(5):
yield [np.arange(9).reshape((1, 3, 3, 1)).astype(np.float32) * i]
def _convert_model(func):
"""Converts TF model to TFLite float model."""
converter = lite.TFLiteConverterV2.from_concrete_functions([func])
return converter.convert()
def _quantize_converter(func, calibration_gen, debug=True):
"""Returns a converter appropriate for the function and debug configs."""
converter = lite.TFLiteConverterV2.from_concrete_functions([func])
converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.representative_dataset = calibration_gen
# Create a TFLite model with new quantizer and numeric verify ops.
converter.optimizations = [lite.Optimize.DEFAULT]
converter.experimental_new_quantizer = True
if debug:
converter._experimental_calibrate_only = True
return converter
def _quantize_model(func, calibration_gen, quantized_io=False, debug=True):
"""Quantizes model, in debug or normal mode."""
converter = _quantize_converter(func, calibration_gen, debug)
if debug:
calibrated = converter.convert()
return convert.mlir_quantize(
calibrated, enable_numeric_verify=True, fully_quantize=quantized_io)
else:
return converter.convert()
def _dummy_fn(*unused_args):
return 0.0
class QuantizationDebugOptionsTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@test_util.run_v2_only
def test_init_duplicate_keys_raises_ValueError(self):
with self.assertRaises(ValueError):
debugger.QuantizationDebugOptions(
layer_debug_metrics={
'a': _dummy_fn,
'b': _dummy_fn
},
model_debug_metrics={
'c': _dummy_fn,
'd': _dummy_fn
},
layer_direct_compare_metrics={
'a': _dummy_fn,
'e': _dummy_fn
})
with self.assertRaises(ValueError):
debugger.QuantizationDebugOptions(
layer_debug_metrics={
'a': _dummy_fn,
'b': _dummy_fn
},
layer_direct_compare_metrics={
'a': _dummy_fn,
'e': _dummy_fn
})
class QuantizationDebuggerTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.tf_model = _get_model()
cls.float_model = _convert_model(cls.tf_model)
cls.debug_model_float = _quantize_model(
cls.tf_model, _calibration_gen, quantized_io=False)
cls.debug_model_int8 = _quantize_model(
cls.tf_model, _calibration_gen, quantized_io=True)
@parameterized.named_parameters(
('float_io', False, False),
('quantized_io', True, False),
('float_io_from_converter', False, True),
('quantized_io_from_converter', True, True),
)
@test_util.run_v2_only
def test_layer_metrics(self, quantized_io, from_converter):
options = debugger.QuantizationDebugOptions(
layer_debug_metrics={'l1_norm': lambda diffs: np.mean(np.abs(diffs))})
if not from_converter:
if quantized_io:
debug_model = QuantizationDebuggerTest.debug_model_int8
else:
debug_model = QuantizationDebuggerTest.debug_model_float
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=debug_model,
debug_dataset=_calibration_gen,
debug_options=options)
else:
options.fully_quantize = quantized_io
quant_debugger = debugger.QuantizationDebugger(
converter=_quantize_converter(self.tf_model, _calibration_gen),
debug_dataset=_calibration_gen,
debug_options=options)
quant_debugger.run()
expected_metrics = {
'num_elements': 9,
'stddev': 0.03850026,
'mean_error': 0.01673192,
'max_abs_error': 0.10039272,
'mean_squared_error': 0.0027558778,
'l1_norm': 0.023704167,
}
self.assertLen(quant_debugger.layer_statistics, 1)
actual_metrics = next(iter(quant_debugger.layer_statistics.values()))
self.assertCountEqual(expected_metrics.keys(), actual_metrics.keys())
for key, value in expected_metrics.items():
self.assertAlmostEqual(value, actual_metrics[key], places=5)
buffer = io.StringIO()
quant_debugger.layer_statistics_dump(buffer)
reader = csv.DictReader(buffer.getvalue().split())
actual_values = next(iter(reader))
expected_values = expected_metrics.copy()
expected_values.update({
'op_name': 'CONV_2D',
'tensor_idx': 7 if quantized_io else 8,
'scale': 0.15686275,
'zero_point': -128,
'tensor_name': r'Identity[1-9]?$'
})
for key, value in expected_values.items():
if isinstance(value, str):
self.assertIsNotNone(
re.match(value, actual_values[key]),
'String is different from expected string. Please fix test code if'
" it's being affected by graph manipulation changes.")
elif isinstance(value, list):
self.assertAlmostEqual(
value[0], float(actual_values[key][1:-1]), places=5)
else:
self.assertAlmostEqual(value, float(actual_values[key]), places=5)
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True),
)
@test_util.run_v2_only
def test_model_metrics(self, quantized_io):
if quantized_io:
debug_model = QuantizationDebuggerTest.debug_model_int8
else:
debug_model = QuantizationDebuggerTest.debug_model_float
options = debugger.QuantizationDebugOptions(
model_debug_metrics={'stdev': lambda x, y: np.std(x[0] - y[0])})
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=debug_model,
float_model_content=QuantizationDebuggerTest.float_model,
debug_dataset=_calibration_gen,
debug_options=options)
quant_debugger.run()
expected_metrics = {'stdev': 0.050998904}
actual_metrics = quant_debugger.model_statistics
self.assertCountEqual(expected_metrics.keys(), actual_metrics.keys())
for key, value in expected_metrics.items():
self.assertAlmostEqual(value, actual_metrics[key], places=5)
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True),
)
@test_util.run_v2_only
def test_layer_direct_compare_metrics(self, quantized_io):
def _corr(float_values, quant_values, scale, zero_point):
dequant_values = (quant_values.astype(np.int32) - zero_point) * scale
return np.corrcoef(float_values.flatten(), dequant_values.flatten())[0, 1]
if quantized_io:
debug_model = QuantizationDebuggerTest.debug_model_int8
else:
debug_model = QuantizationDebuggerTest.debug_model_float
options = debugger.QuantizationDebugOptions(
layer_direct_compare_metrics={'corr': _corr})
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=debug_model,
debug_dataset=_calibration_gen,
debug_options=options)
quant_debugger.run()
expected_metrics = {
'corr': 0.99999,
}
self.assertLen(quant_debugger.layer_statistics, 1)
actual_metrics = next(iter(quant_debugger.layer_statistics.values()))
for key, value in expected_metrics.items():
self.assertAlmostEqual(value, actual_metrics[key], places=5)
@test_util.run_v2_only
def test_wrong_input_raises_ValueError(self):
def wrong_calibration_gen():
for _ in range(5):
yield [
np.ones((1, 3, 3, 1), dtype=np.float32),
np.ones((1, 3, 3, 1), dtype=np.float32)
]
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=QuantizationDebuggerTest.debug_model_float,
debug_dataset=wrong_calibration_gen)
with self.assertRaisesRegex(
ValueError, r'inputs provided \(2\).+inputs to the model \(1\)'):
quant_debugger.run()
@test_util.run_v2_only
def test_non_debug_model_raises_ValueError(self):
normal_quant_model = _quantize_model(
QuantizationDebuggerTest.tf_model, _calibration_gen, debug=False)
with self.assertRaisesRegex(
ValueError, 'Please check if the quantized model is in debug mode'):
debugger.QuantizationDebugger(
quant_debug_model_content=normal_quant_model,
debug_dataset=_calibration_gen)
@parameterized.named_parameters(
('empty quantization parameter', {
'quantization_parameters': {}
}, None),
('empty scales/zero points', {
'quantization_parameters': {
'scales': [],
'zero_points': []
}
}, None),
('invalid scales/zero points', {
'quantization_parameters': {
'scales': [1.0],
'zero_points': []
}
}, None),
('correct case', {
'quantization_parameters': {
'scales': [0.5, 1.0],
'zero_points': [42, 7]
}
}, (0.5, 42)),
)
def test_get_quant_params(self, tensor_detail, expected_value):
self.assertEqual(debugger._get_quant_params(tensor_detail), expected_value)
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True))
@test_util.run_v2_only
def test_denylisted_ops(self, quantized_io):
options = debugger.QuantizationDebugOptions(
layer_debug_metrics={'l1_norm': lambda diffs: np.mean(np.abs(diffs))})
options.fully_quantize = quantized_io
quant_debugger = debugger.QuantizationDebugger(
converter=_quantize_converter(self.tf_model, _calibration_gen),
debug_dataset=_calibration_gen,
debug_options=options)
options.denylisted_ops = ['CONV_2D']
with self.assertRaisesRegex(
ValueError, 'Please check if the quantized model is in debug mode'):
quant_debugger.options = options
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True))
@test_util.run_v2_only
def test_denylisted_nodes(self, quantized_io):
options = debugger.QuantizationDebugOptions(
layer_debug_metrics={'l1_norm': lambda diffs: np.mean(np.abs(diffs))})
options.fully_quantize = quantized_io
options.fully_quantize = quantized_io
quant_debugger = debugger.QuantizationDebugger(
converter=_quantize_converter(self.tf_model, _calibration_gen),
debug_dataset=_calibration_gen,
debug_options=options)
options.denylisted_nodes = ['Identity']
with self.assertRaisesRegex(
ValueError, 'Please check if the quantized model is in debug mode'):
quant_debugger.options = options
@mock.patch.object(metrics.TFLiteMetrics,
'increase_counter_debugger_creation')
def test_creation_counter(self, increase_call):
debug_model = QuantizationDebuggerTest.debug_model_float
debugger.QuantizationDebugger(
quant_debug_model_content=debug_model, debug_dataset=_calibration_gen)
increase_call.assert_called_once()
if __name__ == '__main__':
test.main()
| apache-2.0 |
oscar810429/mysql-5.6_facebook | xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/ib_rsync_test.py | 24 | 5247 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [['--innodb-file-per-table']]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
# remove backup path
if os.path.exists(backup_path):
shutil.rmtree(backup_path)
def test_ib_incremental(self):
self.servers = servers
logging = test_executor.logging
if servers[0].type not in ['mysql','percona']:
return
else:
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
table_name = "`test`"
# populate our server with a test bed
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz"
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
# gather some original values for comparison
show_tables_query = "SHOW TABLES IN test"
retcode, show_tables_result = self.execute_query(show_tables_query, master_server)
self.assertEqual(retcode, 0, msg = show_tables_result)
# take a backup
cmd = [ innobackupex
, "--defaults-file=%s" %master_server.cnf_file
, "--user=root"
, "--port=%d" %master_server.master_port
, "--host=127.0.0.1"
, "--no-timestamp"
, "--ibbackup=%s" %xtrabackup
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# shutdown our server
master_server.stop()
# prepare our backup
cmd = [ innobackupex
, "--apply-log"
, "--no-timestamp"
, "--use-memory=500M"
, "--ibbackup=%s" %xtrabackup
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# remove old datadir
shutil.rmtree(master_server.datadir)
os.mkdir(master_server.datadir)
# restore from backup
cmd = ("%s --defaults-file=%s --copy-back"
" --ibbackup=%s %s" %( innobackupex
, master_server.cnf_file
, xtrabackup
, backup_path))
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0, output)
# restart server (and ensure it doesn't crash)
master_server.start()
self.assertTrue(master_server.status==1, 'Server failed restart from restored datadir...')
# gather some original values for comparison
show_tables_query = "SHOW TABLES IN test"
retcode, restored_show_tables_result = self.execute_query(show_tables_query, master_server)
self.assertEqual(retcode, 0, msg = restored_show_tables_result)
self.assertEqual(show_tables_result, restored_show_tables_result, msg = ("%s || %s" %(show_tables_result, restored_show_tables_result)))
query = "SELECT COUNT(*) FROM DD"
retcode, result = self.execute_query(query, master_server)
self.assertEqual(retcode,0,msg = result)
expected_result = ((100L,),)
self.assertEqual(result, expected_result, msg = "%s || %s" %(expected_result, result))
| gpl-2.0 |
endorphinl/horizon-fork | horizon/test/tests/workflows.py | 38 | 11000 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import forms
from django import http
from horizon import exceptions
from horizon.test import helpers as test
from horizon import workflows
PROJECT_ID = "a23lkjre389fwenj"
INSTANCE_ID = "sdlkjhf9832roiw"
def local_callback_func(request, context):
return "one"
def other_callback_func(request, context):
return "two"
def extra_callback_func(request, context):
return "extra"
class TestActionOne(workflows.Action):
project_id = forms.ChoiceField(label="Project")
user_id = forms.ChoiceField(label="User")
class Meta(object):
name = "Test Action One"
slug = "test_action_one"
def populate_project_id_choices(self, request, context):
return [(PROJECT_ID, "test_project")]
def populate_user_id_choices(self, request, context):
return [(request.user.id, request.user.username)]
def handle(self, request, context):
return {"foo": "bar"}
class TestActionTwo(workflows.Action):
instance_id = forms.CharField(label="Instance")
class Meta(object):
name = "Test Action Two"
slug = "test_action_two"
class TestActionThree(workflows.Action):
extra = forms.CharField(widget=forms.widgets.Textarea)
class Meta(object):
name = "Test Action Three"
slug = "test_action_three"
class AdminAction(workflows.Action):
admin_id = forms.CharField(label="Admin")
class Meta(object):
name = "Admin Action"
slug = "admin_action"
permissions = ("horizon.test",)
class TestStepOne(workflows.Step):
action_class = TestActionOne
contributes = ("project_id", "user_id")
class TestStepTwo(workflows.Step):
action_class = TestActionTwo
depends_on = ("project_id",)
contributes = ("instance_id",)
connections = {"project_id":
(local_callback_func,
"horizon.test.tests.workflows.other_callback_func")}
class TestExtraStep(workflows.Step):
action_class = TestActionThree
depends_on = ("project_id",)
contributes = ("extra_data",)
connections = {"project_id": (extra_callback_func,)}
after = TestStepOne
before = TestStepTwo
class AdminStep(workflows.Step):
action_class = AdminAction
contributes = ("admin_id",)
after = TestStepOne
before = TestStepTwo
class TestWorkflow(workflows.Workflow):
slug = "test_workflow"
default_steps = (TestStepOne, TestStepTwo)
class TestWorkflowView(workflows.WorkflowView):
workflow_class = TestWorkflow
template_name = "workflow.html"
class TestFullscreenWorkflow(workflows.Workflow):
slug = 'test_fullscreen_workflow'
default_steps = (TestStepOne, TestStepTwo)
fullscreen = True
class TestFullscreenWorkflowView(workflows.WorkflowView):
workflow_class = TestFullscreenWorkflow
template_name = "workflow.html"
class WorkflowsTests(test.TestCase):
def setUp(self):
super(WorkflowsTests, self).setUp()
def tearDown(self):
super(WorkflowsTests, self).tearDown()
self._reset_workflow()
def _reset_workflow(self):
TestWorkflow._cls_registry = set([])
def test_workflow_construction(self):
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(self.request)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestExtraStep: test_action_three>',
'<TestStepTwo: test_action_two>'])
self.assertEqual(set(['project_id']), flow.depends_on)
def test_step_construction(self):
step_one = TestStepOne(TestWorkflow(self.request))
# Action slug is moved from Meta by metaclass, and
# Step inherits slug from action.
self.assertEqual(TestActionOne.name, step_one.name)
self.assertEqual(TestActionOne.slug, step_one.slug)
# Handlers should be empty since there are no connections.
self.assertEqual(step_one._handlers, {})
step_two = TestStepTwo(TestWorkflow(self.request))
# Handlers should be populated since we do have connections.
self.assertEqual([local_callback_func, other_callback_func],
step_two._handlers["project_id"])
def test_step_invalid_connections_handlers_not_list_or_tuple(self):
class InvalidStepA(TestStepTwo):
connections = {'project_id': {}}
class InvalidStepB(TestStepTwo):
connections = {'project_id': ''}
with self.assertRaises(TypeError):
InvalidStepA(TestWorkflow(self.request))
with self.assertRaises(TypeError):
InvalidStepB(TestWorkflow(self.request))
def test_step_invalid_connection_handler_not_string_or_callable(self):
class InvalidStepA(TestStepTwo):
connections = {'project_id': (None,)}
class InvalidStepB(TestStepTwo):
connections = {'project_id': (0,)}
with self.assertRaises(TypeError):
InvalidStepA(TestWorkflow(self.request))
with self.assertRaises(TypeError):
InvalidStepB(TestWorkflow(self.request))
def test_step_invalid_callback(self):
# This should raise an exception
class InvalidStep(TestStepTwo):
connections = {"project_id": ('local_callback_func',)}
with self.assertRaises(ValueError):
InvalidStep(TestWorkflow(self.request))
def test_connection_handlers_called(self):
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(self.request)
# This should set the value without any errors, but trigger nothing
flow.context['does_not_exist'] = False
self.assertEqual(False, flow.context['does_not_exist'])
# The order here is relevant. Note that we inserted "extra" between
# steps one and two, and one has no handlers, so we should see
# a response from extra, then one from each of step two's handlers.
val = flow.context.set('project_id', PROJECT_ID)
self.assertEqual([('test_action_three', 'extra'),
('test_action_two', 'one'),
('test_action_two', 'two')], val)
def test_workflow_validation(self):
flow = TestWorkflow(self.request)
# Missing items fail validation.
with self.assertRaises(exceptions.WorkflowValidationError):
flow.is_valid()
# All required items pass validation.
seed = {"project_id": PROJECT_ID,
"user_id": self.user.id,
"instance_id": INSTANCE_ID}
req = self.factory.post("/", seed)
req.user = self.user
flow = TestWorkflow(req, context_seed={"project_id": PROJECT_ID})
for step in flow.steps:
if not step.action.is_valid():
self.fail("Step %s was unexpectedly invalid: %s"
% (step.slug, step.action.errors))
self.assertTrue(flow.is_valid())
# Additional items shouldn't affect validation
flow.context.set("extra_data", "foo")
self.assertTrue(flow.is_valid())
def test_workflow_finalization(self):
flow = TestWorkflow(self.request)
self.assertTrue(flow.finalize())
def test_workflow_view(self):
view = TestWorkflowView.as_view()
req = self.factory.get("/")
res = view(req)
self.assertEqual(200, res.status_code)
def test_workflow_registration(self):
req = self.factory.get("/foo")
flow = TestWorkflow(req)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestStepTwo: test_action_two>'])
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(req)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestExtraStep: test_action_three>',
'<TestStepTwo: test_action_two>'])
def test_workflow_render(self):
TestWorkflow.register(TestExtraStep)
req = self.factory.get("/foo")
flow = TestWorkflow(req)
output = http.HttpResponse(flow.render())
self.assertContains(output, unicode(flow.name))
self.assertContains(output, unicode(TestActionOne.name))
self.assertContains(output, unicode(TestActionTwo.name))
self.assertContains(output, unicode(TestActionThree.name))
def test_has_permissions(self):
self.assertQuerysetEqual(TestWorkflow._cls_registry, [])
TestWorkflow.register(AdminStep)
flow = TestWorkflow(self.request)
step = AdminStep(flow)
self.assertItemsEqual(step.permissions,
("horizon.test",))
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestStepTwo: test_action_two>'])
self.set_permissions(['test'])
self.request.user = self.user
flow = TestWorkflow(self.request)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<AdminStep: admin_action>',
'<TestStepTwo: test_action_two>'])
def test_entry_point(self):
req = self.factory.get("/foo")
flow = TestWorkflow(req)
self.assertEqual("test_action_one", flow.get_entry_point())
flow = TestWorkflow(req, entry_point="test_action_two")
self.assertEqual("test_action_two", flow.get_entry_point())
def test_fullscreenworkflow_view(self):
view = TestFullscreenWorkflowView.as_view()
req = self.factory.get("/")
req.is_ajax = lambda: True
res = view(req)
output = res.render()
self.assertRegexpMatches(str(output),
'class="[^"]*\\bfullscreen\\b[^"]*"')
def test_notfullscreenworkflow_view(self):
view = TestWorkflowView.as_view()
req = self.factory.get("/")
req.is_ajax = lambda: True
res = view(req)
output = res.render()
self.assertNotRegexpMatches(str(output),
'class="[^"]*\\bfullscreen\\b[^"]*"')
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/bulkloader.py | 22 | 2907 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
sys_path = sys.path
try:
sys.path = [os.path.dirname(__file__)] + sys.path
import wrapper_util
finally:
sys.path = sys_path
wrapper_util.reject_old_python_versions((2, 5))
def get_dir_path(sibling):
"""Get a path to the directory of this script.
By default, the canonical path (symlinks resolved) will be returned. In some
environments the canonical directory is not sufficient because different
parts of the SDK are referenced by symlinks, including this very module's
file. In this case, the non-canonical path to this file's directory will be
returned (i.e., the directory where the symlink lives, not the directory
where it points).
Args:
sibling: Relative path to a sibling of this module file. Choose a sibling
that is potentially symlinked into the parent directory.
Returns:
A directory name.
Raises:
ValueError: If no proper path could be determined.
"""
return wrapper_util.get_dir_path(__file__, sibling)
DIR_PATH = get_dir_path(os.path.join('lib', 'ipaddr'))
_PATHS = wrapper_util.Paths(DIR_PATH)
SCRIPT_DIR = _PATHS.default_script_dir
GOOGLE_SQL_DIR = _PATHS.google_sql_dir
EXTRA_PATHS = _PATHS.v1_extra_paths
API_SERVER_EXTRA_PATHS = _PATHS.api_server_extra_paths
ENDPOINTSCFG_EXTRA_PATHS = _PATHS.endpointscfg_extra_paths
OAUTH_CLIENT_EXTRA_PATHS = _PATHS.oauth_client_extra_paths
GOOGLE_SQL_EXTRA_PATHS = _PATHS.google_sql_extra_paths
def fix_sys_path(extra_extra_paths=()):
"""Fix the sys.path to include our extra paths."""
sys.path = EXTRA_PATHS + list(extra_extra_paths) + sys.path
def run_file(file_path, globals_):
"""Execute the given script with the passed-in globals.
Args:
file_path: the path to the wrapper for the given script. This will usually
be a copy of this file.
globals_: the global bindings to be used while executing the wrapped script.
"""
script_name = os.path.basename(file_path)
sys.path = (_PATHS.script_paths(script_name) +
_PATHS.scrub_path(script_name, sys.path))
if 'google' in sys.modules:
del sys.modules['google']
execfile(_PATHS.script_file(script_name), globals_)
if __name__ == '__main__':
run_file(__file__, globals())
| bsd-3-clause |
alephdata/aleph | aleph/logic/entitysets.py | 1 | 3835 | import logging
from aleph.core import cache
from aleph.model import EntitySet, EntitySetItem, Events
from aleph.logic.entities import upsert_entity, refresh_entity
from aleph.logic.collections import index_aggregator
from aleph.logic.aggregator import get_aggregator
from aleph.logic.notifications import publish
log = logging.getLogger(__name__)
def get_entityset(entityset_id):
return EntitySet.by_id(entityset_id)
def refresh_entityset(entityset_id):
cache.kv.delete(cache.object_key(EntitySet, entityset_id))
def create_entityset(collection, data, authz):
"""Create an entity set. This will create or update any entities
that already exist in the entityset and sign their IDs into the collection.
"""
old_to_new_id_map = {}
entity_ids = []
for entity in data.pop("entities", []):
old_id = entity.get("id")
new_id = upsert_entity(entity, collection, sign=True, sync=True)
old_to_new_id_map[old_id] = new_id
entity_ids.append(new_id)
layout = data.get("layout", {})
data["layout"] = replace_layout_ids(layout, old_to_new_id_map)
entityset = EntitySet.create(data, collection, authz)
for entity_id in entity_ids:
save_entityset_item(entityset, collection, entity_id)
publish(
Events.CREATE_ENTITYSET,
params={"collection": collection, "entityset": entityset},
channels=[collection, authz.role],
actor_id=authz.id,
)
return entityset
def save_entityset_item(entityset, collection, entity_id, **data):
"""Change the association between an entity and an entityset. In the case of
a profile, this may require re-indexing of the entity to update the associated
profile_id.
"""
item = EntitySetItem.save(entityset, entity_id, collection_id=collection.id, **data)
if entityset.type == EntitySet.PROFILE and entityset.collection_id == collection.id:
from aleph.logic.profiles import profile_fragments
aggregator = get_aggregator(collection)
profile_fragments(collection, aggregator, entity_id=entity_id)
index_aggregator(collection, aggregator, entity_ids=[entity_id])
refresh_entity(collection, entity_id)
collection.touch()
refresh_entityset(entityset.id)
return item
def replace_layout_ids(layout, old_to_new_id_map):
# Replace ids in vertices
for vtx in layout.get("vertices", []):
ent_id = vtx.get("entityId")
if ent_id in old_to_new_id_map:
new_id = old_to_new_id_map[ent_id]
vtx["entityId"] = new_id
vtx["id"] = vtx["id"].replace(ent_id, new_id)
# Replaces ids in edges
for edge in layout.get("edges", []):
for key in ("sourceId", "targetId"):
if edge[key].startswith("entity"):
old_id = edge[key].split("entity:")[-1]
if old_id in old_to_new_id_map:
new_id = old_to_new_id_map[old_id]
edge[key] = "entity:%s" % new_id
edge["id"] = edge["id"].replace(old_id, new_id)
ent_id = edge.get("entityId")
if ent_id in old_to_new_id_map:
new_id = old_to_new_id_map[ent_id]
edge["entityId"] = new_id
edge["id"] = edge["id"].replace(ent_id, new_id)
# Replace ids in groupings
for group in layout.get("groupings", []):
vertices = []
for vtx in group.get("vertices", []):
if vtx.startswith("entity"):
old_id = vtx.split("entity:")[-1]
if old_id in old_to_new_id_map:
new_id = old_to_new_id_map[old_id]
group["id"] = group["id"].replace(old_id, new_id)
vtx = "entity:%s" % new_id
vertices.append(vtx)
group["vertices"] = vertices
return layout
| mit |
JAOSP/aosp_platform_external_chromium_org | tools/json_schema_compiler/highlighters/pygments_highlighter.py | 179 | 1273 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
try:
import pygments
from pygments.lexers import CppLexer
from pygments.formatters import HtmlFormatter
PYGMENTS_IMPORTED = True
except ImportError:
print('It appears that Pygments is not installed. '
'Can be installed using easy_install Pygments or from http://pygments.org.')
PYGMENTS_IMPORTED = False
class PygmentsHighlighter(object):
def __init__(self):
if not PYGMENTS_IMPORTED:
raise ImportError('Pygments not installed')
"""Highlighter that uses the python pygments library to highlight code.
"""
def GetCSS(self, style):
formatter = HtmlFormatter(linenos=True,
style=pygments.styles.get_style_by_name(style))
return formatter.get_style_defs('.highlight')
def GetCodeElement(self, code, style):
formatter = HtmlFormatter(linenos=True,
style=pygments.styles.get_style_by_name(style))
return pygments.highlight(code, CppLexer(), formatter)
def DisplayName(self):
return 'pygments' + ('' if PYGMENTS_IMPORTED else ' (not installed)')
def GetStyles(self):
return list(pygments.styles.get_all_styles())
| bsd-3-clause |
MoltenMotherBoard/platform_kernel_samsung_cori | tools/perf/scripts/python/sctop.py | 895 | 1936 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import thread
import time
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40d %10d\n" % (id, val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
mshafiq9/django | django/apps/config.py | 224 | 8284 | import os
from importlib import import_module
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils._os import upath
from django.utils.module_loading import module_has_submodule
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3's _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
else:
# For unknown reasons, sometimes the list returned by __path__
# contains duplicates that must be removed (#25246).
paths = list(set(paths))
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def check_models_ready(self):
"""
Raises an exception if models haven't been imported yet.
"""
if self.models is None:
raise AppRegistryNotReady(
"Models for app '%s' haven't been imported yet." % self.label)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
self.check_models_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.check_models_ready()
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| bsd-3-clause |
robertwb/incubator-beam | sdks/python/apache_beam/io/gcp/bigquery.py | 4 | 86985 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BigQuery sources and sinks.
This module implements reading from and writing to BigQuery tables. It relies
on several classes exposed by the BigQuery API: TableSchema, TableFieldSchema,
TableRow, and TableCell. The default mode is to return table rows read from a
BigQuery source as dictionaries. Similarly a Write transform to a BigQuerySink
accepts PCollections of dictionaries. This is done for more convenient
programming. If desired, the native TableRow objects can be used throughout to
represent rows (use an instance of TableRowJsonCoder as a coder argument when
creating the sources or sinks respectively).
Also, for programming convenience, instances of TableReference and TableSchema
have a string representation that can be used for the corresponding arguments:
- TableReference can be a PROJECT:DATASET.TABLE or DATASET.TABLE string.
- TableSchema can be a NAME:TYPE{,NAME:TYPE}* string
(e.g. 'month:STRING,event_count:INTEGER').
The syntax supported is described here:
https://cloud.google.com/bigquery/bq-command-line-tool-quickstart
BigQuery sources can be used as main inputs or side inputs. A main input
(common case) is expected to be massive and will be split into manageable chunks
and processed in parallel. Side inputs are expected to be small and will be read
completely every time a ParDo DoFn gets executed. In the example below the
lambda function implementing the DoFn for the Map transform will get on each
call *one* row of the main table and *all* rows of the side table. The runner
may use some caching techniques to share the side inputs between calls in order
to avoid excessive reading:::
main_table = pipeline | 'VeryBig' >> beam.io.ReadFromBigQuery(...)
side_table = pipeline | 'NotBig' >> beam.io.ReadFromBigQuery(...)
results = (
main_table
| 'ProcessData' >> beam.Map(
lambda element, side_input: ..., AsList(side_table)))
There is no difference in how main and side inputs are read. What makes the
side_table a 'side input' is the AsList wrapper used when passing the table
as a parameter to the Map transform. AsList signals to the execution framework
that its input should be made available whole.
The main and side inputs are implemented differently. Reading a BigQuery table
as main input entails exporting the table to a set of GCS files (in AVRO or in
JSON format) and then processing those files.
Users may provide a query to read from rather than reading all of a BigQuery
table. If specified, the result obtained by executing the specified query will
be used as the data of the input transform.::
query_results = pipeline | beam.io.gcp.bigquery.ReadFromBigQuery(
query='SELECT year, mean_temp FROM samples.weather_stations')
When creating a BigQuery input transform, users should provide either a query
or a table. Pipeline construction will fail with a validation error if neither
or both are specified.
When reading via `ReadFromBigQuery`, bytes are returned decoded as bytes.
This is due to the fact that ReadFromBigQuery uses Avro exports by default.
When reading from BigQuery using `apache_beam.io.BigQuerySource`, bytes are
returned as base64-encoded bytes. To get base64-encoded bytes using
`ReadFromBigQuery`, you can use the flag `use_json_exports` to export
data as JSON, and receive base64-encoded bytes.
ReadAllFromBigQuery
-------------------
Beam 2.27.0 introduces a new transform called `ReadAllFromBigQuery` which
allows you to define table and query reads from BigQuery at pipeline
runtime.:::
read_requests = p | beam.Create([
ReadFromBigQueryRequest(query='SELECT * FROM mydataset.mytable'),
ReadFromBigQueryRequest(table='myproject.mydataset.mytable')])
results = read_requests | ReadAllFromBigQuery()
A good application for this transform is in streaming pipelines to
refresh a side input coming from BigQuery. This would work like so:::
side_input = (
p
| 'PeriodicImpulse' >> PeriodicImpulse(
first_timestamp, last_timestamp, interval, True)
| 'MapToReadRequest' >> beam.Map(
lambda x: ReadFromBigQueryRequest(table='dataset.table'))
| beam.io.ReadAllFromBigQuery())
main_input = (
p
| 'MpImpulse' >> beam.Create(sample_main_input_elements)
|
'MapMpToTimestamped' >> beam.Map(lambda src: TimestampedValue(src, src))
| 'WindowMpInto' >> beam.WindowInto(
window.FixedWindows(main_input_windowing_interval)))
result = (
main_input
| 'ApplyCrossJoin' >> beam.FlatMap(
cross_join, rights=beam.pvalue.AsIter(side_input)))
**Note**: This transform is supported on Portable and Dataflow v2 runners.
**Note**: This transform does not currently clean up temporary datasets
created for its execution. (BEAM-11359)
Writing Data to BigQuery
========================
The `WriteToBigQuery` transform is the recommended way of writing data to
BigQuery. It supports a large set of parameters to customize how you'd like to
write to BigQuery.
Table References
----------------
This transform allows you to provide static `project`, `dataset` and `table`
parameters which point to a specific BigQuery table to be created. The `table`
parameter can also be a dynamic parameter (i.e. a callable), which receives an
element to be written to BigQuery, and returns the table that that element
should be sent to.
You may also provide a tuple of PCollectionView elements to be passed as side
inputs to your callable. For example, suppose that one wishes to send
events of different types to different tables, and the table names are
computed at pipeline runtime, one may do something like the following::
with Pipeline() as p:
elements = (p | beam.Create([
{'type': 'error', 'timestamp': '12:34:56', 'message': 'bad'},
{'type': 'user_log', 'timestamp': '12:34:59', 'query': 'flu symptom'},
]))
table_names = (p | beam.Create([
('error', 'my_project:dataset1.error_table_for_today'),
('user_log', 'my_project:dataset1.query_table_for_today'),
])
table_names_dict = beam.pvalue.AsDict(table_names)
elements | beam.io.gcp.bigquery.WriteToBigQuery(
table=lambda row, table_dict: table_dict[row['type']],
table_side_inputs=(table_names_dict,))
In the example above, the `table_dict` argument passed to the function in
`table_dict` is the side input coming from `table_names_dict`, which is passed
as part of the `table_side_inputs` argument.
Schemas
---------
This transform also allows you to provide a static or dynamic `schema`
parameter (i.e. a callable).
If providing a callable, this should take in a table reference (as returned by
the `table` parameter), and return the corresponding schema for that table.
This allows to provide different schemas for different tables::
def compute_table_name(row):
...
errors_schema = {'fields': [
{'name': 'type', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'message', 'type': 'STRING', 'mode': 'NULLABLE'}]}
queries_schema = {'fields': [
{'name': 'type', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'query', 'type': 'STRING', 'mode': 'NULLABLE'}]}
with Pipeline() as p:
elements = (p | beam.Create([
{'type': 'error', 'timestamp': '12:34:56', 'message': 'bad'},
{'type': 'user_log', 'timestamp': '12:34:59', 'query': 'flu symptom'},
]))
elements | beam.io.gcp.bigquery.WriteToBigQuery(
table=compute_table_name,
schema=lambda table: (errors_schema
if 'errors' in table
else queries_schema))
It may be the case that schemas are computed at pipeline runtime. In cases
like these, one can also provide a `schema_side_inputs` parameter, which is
a tuple of PCollectionViews to be passed to the schema callable (much like
the `table_side_inputs` parameter).
Additional Parameters for BigQuery Tables
-----------------------------------------
This sink is able to create tables in BigQuery if they don't already exist. It
also relies on creating temporary tables when performing file loads.
The WriteToBigQuery transform creates tables using the BigQuery API by
inserting a load job (see the API reference [1]), or by inserting a new table
(see the API reference for that [2][3]).
When creating a new BigQuery table, there are a number of extra parameters
that one may need to specify. For example, clustering, partitioning, data
encoding, etc. It is possible to provide these additional parameters by
passing a Python dictionary as `additional_bq_parameters` to the transform.
As an example, to create a table that has specific partitioning, and
clustering properties, one would do the following::
additional_bq_parameters = {
'timePartitioning': {'type': 'DAY'},
'clustering': {'fields': ['country']}}
with Pipeline() as p:
elements = (p | beam.Create([
{'country': 'mexico', 'timestamp': '12:34:56', 'query': 'acapulco'},
{'country': 'canada', 'timestamp': '12:34:59', 'query': 'influenza'},
]))
elements | beam.io.gcp.bigquery.WriteToBigQuery(
table='project_name1:dataset_2.query_events_table',
additional_bq_parameters=additional_bq_parameters)
Much like the schema case, the parameter with `additional_bq_parameters` can
also take a callable that receives a table reference.
[1] https://cloud.google.com/bigquery/docs/reference/rest/v2/Job\
#jobconfigurationload
[2] https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
[3] https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
*** Short introduction to BigQuery concepts ***
Tables have rows (TableRow) and each row has cells (TableCell).
A table has a schema (TableSchema), which in turn describes the schema of each
cell (TableFieldSchema). The terms field and cell are used interchangeably.
TableSchema: Describes the schema (types and order) for values in each row.
Has one attribute, 'field', which is list of TableFieldSchema objects.
TableFieldSchema: Describes the schema (type, name) for one field.
Has several attributes, including 'name' and 'type'. Common values for
the type attribute are: 'STRING', 'INTEGER', 'FLOAT', 'BOOLEAN', 'NUMERIC',
'GEOGRAPHY'.
All possible values are described at:
https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
TableRow: Holds all values in a table row. Has one attribute, 'f', which is a
list of TableCell instances.
TableCell: Holds the value for one cell (or field). Has one attribute,
'v', which is a JsonValue instance. This class is defined in
apitools.base.py.extra_types.py module.
As of Beam 2.7.0, the NUMERIC data type is supported. This data type supports
high-precision decimal numbers (precision of 38 digits, scale of 9 digits).
The GEOGRAPHY data type works with Well-Known Text (See
https://en.wikipedia.org/wiki/Well-known_text) format for reading and writing
to BigQuery.
BigQuery IO requires values of BYTES datatype to be encoded using base64
encoding when writing to BigQuery.
"""
# pytype: skip-file
import collections
import itertools
import json
import logging
import random
import time
import uuid
from typing import Dict
from typing import Union
import apache_beam as beam
from apache_beam import coders
from apache_beam import pvalue
from apache_beam.internal.gcp.json_value import from_json_value
from apache_beam.internal.gcp.json_value import to_json_value
from apache_beam.io.avroio import _create_avro_source as create_avro_source
from apache_beam.io.filesystems import CompressionTypes
from apache_beam.io.filesystems import FileSystems
from apache_beam.io.gcp import bigquery_tools
from apache_beam.io.gcp.bigquery_io_metadata import create_bigquery_io_metadata
from apache_beam.io.gcp.bigquery_read_internal import _BigQueryReadSplit
from apache_beam.io.gcp.bigquery_read_internal import _JsonToDictCoder
from apache_beam.io.gcp.bigquery_read_internal import _PassThroughThenCleanup
from apache_beam.io.gcp.bigquery_read_internal import bigquery_export_destination_uri
from apache_beam.io.gcp.bigquery_tools import RetryStrategy
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.io.iobase import BoundedSource
from apache_beam.io.iobase import RangeTracker
from apache_beam.io.iobase import SDFBoundedSourceReader
from apache_beam.io.iobase import SourceBundle
from apache_beam.io.textio import _TextSource as TextSource
from apache_beam.metrics import Metrics
from apache_beam.options import value_provider as vp
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.options.value_provider import ValueProvider
from apache_beam.options.value_provider import check_accessible
from apache_beam.runners.dataflow.native_io import iobase as dataflow_io
from apache_beam.transforms import DoFn
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.sideinputs import SIDE_INPUT_PREFIX
from apache_beam.transforms.sideinputs import get_sideinput_index
from apache_beam.transforms.util import ReshufflePerKey
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import retry
from apache_beam.utils.annotations import deprecated
from apache_beam.utils.annotations import experimental
try:
from apache_beam.io.gcp.internal.clients.bigquery import DatasetReference
from apache_beam.io.gcp.internal.clients.bigquery import TableReference
except ImportError:
DatasetReference = None
TableReference = None
__all__ = [
'TableRowJsonCoder',
'BigQueryDisposition',
'BigQuerySource',
'BigQuerySink',
'WriteToBigQuery',
'ReadFromBigQuery',
'ReadFromBigQueryRequest',
'ReadAllFromBigQuery',
'SCHEMA_AUTODETECT',
]
_LOGGER = logging.getLogger(__name__)
"""
Template for BigQuery jobs created by BigQueryIO. This template is:
`"beam_bq_job_{job_type}_{job_id}_{step_id}_{random}"`, where:
- `job_type` represents the BigQuery job type (e.g. extract / copy / load /
query).
- `job_id` is the Beam job name.
- `step_id` is a UUID representing the the Dataflow step that created the
BQ job.
- `random` is a random string.
NOTE: This job name template does not have backwards compatibility guarantees.
"""
BQ_JOB_NAME_TEMPLATE = "beam_bq_job_{job_type}_{job_id}_{step_id}{random}"
@deprecated(since='2.11.0', current="bigquery_tools.parse_table_reference")
def _parse_table_reference(table, dataset=None, project=None):
return bigquery_tools.parse_table_reference(table, dataset, project)
@deprecated(
since='2.11.0', current="bigquery_tools.parse_table_schema_from_json")
def parse_table_schema_from_json(schema_string):
return bigquery_tools.parse_table_schema_from_json(schema_string)
@deprecated(since='2.11.0', current="bigquery_tools.default_encoder")
def default_encoder(obj):
return bigquery_tools.default_encoder(obj)
@deprecated(since='2.11.0', current="bigquery_tools.RowAsDictJsonCoder")
def RowAsDictJsonCoder(*args, **kwargs):
return bigquery_tools.RowAsDictJsonCoder(*args, **kwargs)
@deprecated(since='2.11.0', current="bigquery_tools.BigQueryReader")
def BigQueryReader(*args, **kwargs):
return bigquery_tools.BigQueryReader(*args, **kwargs)
@deprecated(since='2.11.0', current="bigquery_tools.BigQueryWriter")
def BigQueryWriter(*args, **kwargs):
return bigquery_tools.BigQueryWriter(*args, **kwargs)
@deprecated(since='2.11.0', current="bigquery_tools.BigQueryWrapper")
def BigQueryWrapper(*args, **kwargs):
return bigquery_tools.BigQueryWrapper(*args, **kwargs)
class TableRowJsonCoder(coders.Coder):
"""A coder for a TableRow instance to/from a JSON string.
Note that the encoding operation (used when writing to sinks) requires the
table schema in order to obtain the ordered list of field names. Reading from
sources on the other hand does not need the table schema.
"""
def __init__(self, table_schema=None):
# The table schema is needed for encoding TableRows as JSON (writing to
# sinks) because the ordered list of field names is used in the JSON
# representation.
self.table_schema = table_schema
# Precompute field names since we need them for row encoding.
if self.table_schema:
self.field_names = tuple(fs.name for fs in self.table_schema.fields)
self.field_types = tuple(fs.type for fs in self.table_schema.fields)
def encode(self, table_row):
if self.table_schema is None:
raise AttributeError(
'The TableRowJsonCoder requires a table schema for '
'encoding operations. Please specify a table_schema argument.')
try:
return json.dumps(
collections.OrderedDict(
zip(
self.field_names,
[from_json_value(f.v) for f in table_row.f])),
allow_nan=False,
default=bigquery_tools.default_encoder)
except ValueError as e:
raise ValueError('%s. %s' % (e, bigquery_tools.JSON_COMPLIANCE_ERROR))
def decode(self, encoded_table_row):
od = json.loads(
encoded_table_row, object_pairs_hook=collections.OrderedDict)
return bigquery.TableRow(
f=[bigquery.TableCell(v=to_json_value(e)) for e in od.values()])
class BigQueryDisposition(object):
"""Class holding standard strings used for create and write dispositions."""
CREATE_NEVER = 'CREATE_NEVER'
CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'
WRITE_TRUNCATE = 'WRITE_TRUNCATE'
WRITE_APPEND = 'WRITE_APPEND'
WRITE_EMPTY = 'WRITE_EMPTY'
@staticmethod
def validate_create(disposition):
values = (
BigQueryDisposition.CREATE_NEVER, BigQueryDisposition.CREATE_IF_NEEDED)
if disposition not in values:
raise ValueError(
'Invalid create disposition %s. Expecting %s' % (disposition, values))
return disposition
@staticmethod
def validate_write(disposition):
values = (
BigQueryDisposition.WRITE_TRUNCATE,
BigQueryDisposition.WRITE_APPEND,
BigQueryDisposition.WRITE_EMPTY)
if disposition not in values:
raise ValueError(
'Invalid write disposition %s. Expecting %s' % (disposition, values))
return disposition
# -----------------------------------------------------------------------------
# BigQuerySource, BigQuerySink.
@deprecated(since='2.25.0', current="ReadFromBigQuery")
def BigQuerySource(
table=None,
dataset=None,
project=None,
query=None,
validate=False,
coder=None,
use_standard_sql=False,
flatten_results=True,
kms_key=None,
use_dataflow_native_source=False):
if use_dataflow_native_source:
return _BigQuerySource(
table,
dataset,
project,
query,
validate,
coder,
use_standard_sql,
flatten_results,
kms_key)
else:
return ReadFromBigQuery(
table=table,
dataset=dataset,
project=project,
query=query,
validate=validate,
coder=coder,
use_standard_sql=use_standard_sql,
flatten_results=flatten_results,
use_json_exports=True,
kms_key=kms_key)
@deprecated(since='2.25.0', current="ReadFromBigQuery")
class _BigQuerySource(dataflow_io.NativeSource):
"""A source based on a BigQuery table."""
def __init__(
self,
table=None,
dataset=None,
project=None,
query=None,
validate=False,
coder=None,
use_standard_sql=False,
flatten_results=True,
kms_key=None,
temp_dataset=None):
"""Initialize a :class:`BigQuerySource`.
Args:
table (str): The ID of a BigQuery table. If specified all data of the
table will be used as input of the current source. The ID must contain
only letters ``a-z``, ``A-Z``, numbers ``0-9``, or underscores
``_``. If dataset and query arguments are :data:`None` then the table
argument must contain the entire table reference specified as:
``'DATASET.TABLE'`` or ``'PROJECT:DATASET.TABLE'``.
dataset (str): The ID of the dataset containing this table or
:data:`None` if the table reference is specified entirely by the table
argument or a query is specified.
project (str): The ID of the project containing this table or
:data:`None` if the table reference is specified entirely by the table
argument or a query is specified.
query (str): A query to be used instead of arguments table, dataset, and
project.
validate (bool): If :data:`True`, various checks will be done when source
gets initialized (e.g., is table present?). This should be
:data:`True` for most scenarios in order to catch errors as early as
possible (pipeline construction instead of pipeline execution). It
should be :data:`False` if the table is created during pipeline
execution by a previous step.
coder (~apache_beam.coders.coders.Coder): The coder for the table
rows if serialized to disk. If :data:`None`, then the default coder is
:class:`~apache_beam.io.gcp.bigquery_tools.RowAsDictJsonCoder`,
which will interpret every line in a file as a JSON serialized
dictionary. This argument needs a value only in special cases when
returning table rows as dictionaries is not desirable.
use_standard_sql (bool): Specifies whether to use BigQuery's standard SQL
dialect for this query. The default value is :data:`False`.
If set to :data:`True`, the query will use BigQuery's updated SQL
dialect with improved standards compliance.
This parameter is ignored for table inputs.
flatten_results (bool): Flattens all nested and repeated fields in the
query results. The default value is :data:`True`.
kms_key (str): Optional Cloud KMS key name for use when creating new
tables.
temp_dataset (``google.cloud.bigquery.dataset.DatasetReference``):
The dataset in which to create temporary tables when performing file
loads. By default, a new dataset is created in the execution project for
temporary tables.
Raises:
ValueError: if any of the following is true:
1) the table reference as a string does not match the expected format
2) neither a table nor a query is specified
3) both a table and a query is specified.
"""
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apitools.base import py # pylint: disable=unused-import
except ImportError:
raise ImportError(
'Google Cloud IO not available, '
'please install apache_beam[gcp]')
if table is not None and query is not None:
raise ValueError(
'Both a BigQuery table and a query were specified.'
' Please specify only one of these.')
elif table is None and query is None:
raise ValueError('A BigQuery table or a query must be specified')
elif table is not None:
self.table_reference = bigquery_tools.parse_table_reference(
table, dataset, project)
self.query = None
self.use_legacy_sql = True
else:
self.query = query
# TODO(BEAM-1082): Change the internal flag to be standard_sql
self.use_legacy_sql = not use_standard_sql
self.table_reference = None
self.validate = validate
self.flatten_results = flatten_results
self.coder = coder or bigquery_tools.RowAsDictJsonCoder()
self.kms_key = kms_key
self.temp_dataset = temp_dataset
def display_data(self):
if self.query is not None:
res = {'query': DisplayDataItem(self.query, label='Query')}
else:
if self.table_reference.projectId is not None:
tableSpec = '{}:{}.{}'.format(
self.table_reference.projectId,
self.table_reference.datasetId,
self.table_reference.tableId)
else:
tableSpec = '{}.{}'.format(
self.table_reference.datasetId, self.table_reference.tableId)
res = {'table': DisplayDataItem(tableSpec, label='Table')}
res['validation'] = DisplayDataItem(
self.validate, label='Validation Enabled')
return res
@property
def format(self):
"""Source format name required for remote execution."""
return 'bigquery'
def reader(self, test_bigquery_client=None):
return bigquery_tools.BigQueryReader(
source=self,
test_bigquery_client=test_bigquery_client,
use_legacy_sql=self.use_legacy_sql,
flatten_results=self.flatten_results,
kms_key=self.kms_key)
class _CustomBigQuerySource(BoundedSource):
def __init__(
self,
gcs_location=None,
table=None,
dataset=None,
project=None,
query=None,
validate=False,
pipeline_options=None,
coder=None,
use_standard_sql=False,
flatten_results=True,
kms_key=None,
bigquery_job_labels=None,
use_json_exports=False,
job_name=None,
step_name=None,
unique_id=None,
temp_dataset=None):
if table is not None and query is not None:
raise ValueError(
'Both a BigQuery table and a query were specified.'
' Please specify only one of these.')
elif table is None and query is None:
raise ValueError('A BigQuery table or a query must be specified')
elif table is not None:
self.table_reference = bigquery_tools.parse_table_reference(
table, dataset, project)
self.query = None
self.use_legacy_sql = True
else:
if isinstance(query, str):
query = StaticValueProvider(str, query)
self.query = query
# TODO(BEAM-1082): Change the internal flag to be standard_sql
self.use_legacy_sql = not use_standard_sql
self.table_reference = None
self.gcs_location = gcs_location
self.project = project
self.validate = validate
self.flatten_results = flatten_results
self.coder = coder or _JsonToDictCoder
self.kms_key = kms_key
self.split_result = None
self.options = pipeline_options
self.bq_io_metadata = None # Populate in setup, as it may make an RPC
self.bigquery_job_labels = bigquery_job_labels or {}
self.use_json_exports = use_json_exports
self.temp_dataset = temp_dataset
self._job_name = job_name or 'BQ_EXPORT_JOB'
self._step_name = step_name
self._source_uuid = unique_id
def _get_bq_metadata(self):
if not self.bq_io_metadata:
self.bq_io_metadata = create_bigquery_io_metadata(self._step_name)
return self.bq_io_metadata
def display_data(self):
export_format = 'JSON' if self.use_json_exports else 'AVRO'
return {
'table': str(self.table_reference),
'query': str(self.query),
'project': str(self.project),
'use_legacy_sql': self.use_legacy_sql,
'bigquery_job_labels': json.dumps(self.bigquery_job_labels),
'export_file_format': export_format,
'launchesBigQueryJobs': DisplayDataItem(
True, label="This Dataflow job launches bigquery jobs."),
}
def estimate_size(self):
bq = bigquery_tools.BigQueryWrapper()
if self.table_reference is not None:
table_ref = self.table_reference
if (isinstance(self.table_reference, vp.ValueProvider) and
self.table_reference.is_accessible()):
table_ref = bigquery_tools.parse_table_reference(
self.table_reference.get(), project=self._get_project())
elif isinstance(self.table_reference, vp.ValueProvider):
# Size estimation is best effort. We return None as we have
# no access to the table that we're querying.
return None
if not table_ref.projectId:
table_ref.projectId = self._get_project()
table = bq.get_table(
table_ref.projectId, table_ref.datasetId, table_ref.tableId)
return int(table.numBytes)
elif self.query is not None and self.query.is_accessible():
project = self._get_project()
query_job_name = bigquery_tools.generate_bq_job_name(
self._job_name,
self._source_uuid,
bigquery_tools.BigQueryJobTypes.QUERY,
'%s_%s' % (int(time.time()), random.randint(0, 1000)))
job = bq._start_query_job(
project,
self.query.get(),
self.use_legacy_sql,
self.flatten_results,
job_id=query_job_name,
dry_run=True,
kms_key=self.kms_key,
job_labels=self._get_bq_metadata().add_additional_bq_job_labels(
self.bigquery_job_labels))
size = int(job.statistics.totalBytesProcessed)
return size
else:
# Size estimation is best effort. We return None as we have
# no access to the query that we're running.
return None
def _get_project(self):
"""Returns the project that queries and exports will be billed to."""
project = self.options.view_as(GoogleCloudOptions).project
if isinstance(project, vp.ValueProvider):
project = project.get()
if self.temp_dataset:
return self.temp_dataset.projectId
if not project:
project = self.project
return project
def _create_source(self, path, schema):
if not self.use_json_exports:
return create_avro_source(path, use_fastavro=True)
else:
return TextSource(
path,
min_bundle_size=0,
compression_type=CompressionTypes.UNCOMPRESSED,
strip_trailing_newlines=True,
coder=self.coder(schema))
def split(self, desired_bundle_size, start_position=None, stop_position=None):
if self.split_result is None:
bq = bigquery_tools.BigQueryWrapper(
temp_dataset_id=(
self.temp_dataset.datasetId if self.temp_dataset else None))
if self.query is not None:
self._setup_temporary_dataset(bq)
self.table_reference = self._execute_query(bq)
if not self.table_reference.projectId:
self.table_reference.projectId = self._get_project()
schema, metadata_list = self._export_files(bq)
self.split_result = [
self._create_source(metadata.path, schema)
for metadata in metadata_list
]
if self.query is not None:
bq.clean_up_temporary_dataset(self._get_project())
for source in self.split_result:
yield SourceBundle(1.0, source, None, None)
def get_range_tracker(self, start_position, stop_position):
class CustomBigQuerySourceRangeTracker(RangeTracker):
"""A RangeTracker that always returns positions as None."""
def start_position(self):
return None
def stop_position(self):
return None
return CustomBigQuerySourceRangeTracker()
def read(self, range_tracker):
raise NotImplementedError('BigQuery source must be split before being read')
@check_accessible(['query'])
def _setup_temporary_dataset(self, bq):
location = bq.get_query_location(
self._get_project(), self.query.get(), self.use_legacy_sql)
bq.create_temporary_dataset(self._get_project(), location)
@check_accessible(['query'])
def _execute_query(self, bq):
query_job_name = bigquery_tools.generate_bq_job_name(
self._job_name,
self._source_uuid,
bigquery_tools.BigQueryJobTypes.QUERY,
'%s_%s' % (int(time.time()), random.randint(0, 1000)))
job = bq._start_query_job(
self._get_project(),
self.query.get(),
self.use_legacy_sql,
self.flatten_results,
job_id=query_job_name,
kms_key=self.kms_key,
job_labels=self._get_bq_metadata().add_additional_bq_job_labels(
self.bigquery_job_labels))
job_ref = job.jobReference
bq.wait_for_bq_job(job_ref, max_retries=0)
return bq._get_temp_table(self._get_project())
def _export_files(self, bq):
"""Runs a BigQuery export job.
Returns:
bigquery.TableSchema instance, a list of FileMetadata instances
"""
job_labels = self._get_bq_metadata().add_additional_bq_job_labels(
self.bigquery_job_labels)
export_job_name = bigquery_tools.generate_bq_job_name(
self._job_name,
self._source_uuid,
bigquery_tools.BigQueryJobTypes.EXPORT,
'%s_%s' % (int(time.time()), random.randint(0, 1000)))
temp_location = self.options.view_as(GoogleCloudOptions).temp_location
gcs_location = bigquery_export_destination_uri(
self.gcs_location, temp_location, self._source_uuid)
if self.use_json_exports:
job_ref = bq.perform_extract_job([gcs_location],
export_job_name,
self.table_reference,
bigquery_tools.FileFormat.JSON,
project=self._get_project(),
job_labels=job_labels,
include_header=False)
else:
job_ref = bq.perform_extract_job([gcs_location],
export_job_name,
self.table_reference,
bigquery_tools.FileFormat.AVRO,
project=self._get_project(),
include_header=False,
job_labels=job_labels,
use_avro_logical_types=True)
bq.wait_for_bq_job(job_ref)
metadata_list = FileSystems.match([gcs_location])[0].metadata_list
if isinstance(self.table_reference, vp.ValueProvider):
table_ref = bigquery_tools.parse_table_reference(
self.table_reference.get(), project=self.project)
else:
table_ref = self.table_reference
table = bq.get_table(
table_ref.projectId, table_ref.datasetId, table_ref.tableId)
return table.schema, metadata_list
@deprecated(since='2.11.0', current="WriteToBigQuery")
class BigQuerySink(dataflow_io.NativeSink):
"""A sink based on a BigQuery table.
This BigQuery sink triggers a Dataflow native sink for BigQuery
that only supports batch pipelines.
Instead of using this sink directly, please use WriteToBigQuery
transform that works for both batch and streaming pipelines.
"""
def __init__(
self,
table,
dataset=None,
project=None,
schema=None,
create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=BigQueryDisposition.WRITE_EMPTY,
validate=False,
coder=None,
kms_key=None):
"""Initialize a BigQuerySink.
Args:
table (str): The ID of the table. The ID must contain only letters
``a-z``, ``A-Z``, numbers ``0-9``, or underscores ``_``. If
**dataset** argument is :data:`None` then the table argument must
contain the entire table reference specified as: ``'DATASET.TABLE'`` or
``'PROJECT:DATASET.TABLE'``.
dataset (str): The ID of the dataset containing this table or
:data:`None` if the table reference is specified entirely by the table
argument.
project (str): The ID of the project containing this table or
:data:`None` if the table reference is specified entirely by the table
argument.
schema (str): The schema to be used if the BigQuery table to write has
to be created. This can be either specified as a
:class:`~apache_beam.io.gcp.internal.clients.bigquery.\
bigquery_v2_messages.TableSchema` object or a single string of the form
``'field1:type1,field2:type2,field3:type3'`` that defines a comma
separated list of fields. Here ``'type'`` should specify the BigQuery
type of the field. Single string based schemas do not support nested
fields, repeated fields, or specifying a BigQuery mode for fields (mode
will always be set to ``'NULLABLE'``).
create_disposition (BigQueryDisposition): A string describing what
happens if the table does not exist. Possible values are:
* :attr:`BigQueryDisposition.CREATE_IF_NEEDED`: create if does not
exist.
* :attr:`BigQueryDisposition.CREATE_NEVER`: fail the write if does not
exist.
write_disposition (BigQueryDisposition): A string describing what
happens if the table has already some data. Possible values are:
* :attr:`BigQueryDisposition.WRITE_TRUNCATE`: delete existing rows.
* :attr:`BigQueryDisposition.WRITE_APPEND`: add to existing rows.
* :attr:`BigQueryDisposition.WRITE_EMPTY`: fail the write if table not
empty.
validate (bool): If :data:`True`, various checks will be done when sink
gets initialized (e.g., is table present given the disposition
arguments?). This should be :data:`True` for most scenarios in order to
catch errors as early as possible (pipeline construction instead of
pipeline execution). It should be :data:`False` if the table is created
during pipeline execution by a previous step.
coder (~apache_beam.coders.coders.Coder): The coder for the
table rows if serialized to disk. If :data:`None`, then the default
coder is :class:`~apache_beam.io.gcp.bigquery_tools.RowAsDictJsonCoder`,
which will interpret every element written to the sink as a dictionary
that will be JSON serialized as a line in a file. This argument needs a
value only in special cases when writing table rows as dictionaries is
not desirable.
kms_key (str): Optional Cloud KMS key name for use when creating new
tables.
Raises:
TypeError: if the schema argument is not a :class:`str` or a
:class:`~apache_beam.io.gcp.internal.clients.bigquery.\
bigquery_v2_messages.TableSchema` object.
ValueError: if the table reference as a string does not
match the expected format.
"""
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apitools.base import py # pylint: disable=unused-import
except ImportError:
raise ImportError(
'Google Cloud IO not available, '
'please install apache_beam[gcp]')
self.table_reference = bigquery_tools.parse_table_reference(
table, dataset, project)
# Transform the table schema into a bigquery.TableSchema instance.
if isinstance(schema, str):
# TODO(silviuc): Should add a regex-based validation of the format.
table_schema = bigquery.TableSchema()
schema_list = [s.strip(' ') for s in schema.split(',')]
for field_and_type in schema_list:
field_name, field_type = field_and_type.split(':')
field_schema = bigquery.TableFieldSchema()
field_schema.name = field_name
field_schema.type = field_type
field_schema.mode = 'NULLABLE'
table_schema.fields.append(field_schema)
self.table_schema = table_schema
elif schema is None:
# TODO(silviuc): Should check that table exists if no schema specified.
self.table_schema = schema
elif isinstance(schema, bigquery.TableSchema):
self.table_schema = schema
else:
raise TypeError('Unexpected schema argument: %s.' % schema)
self.create_disposition = BigQueryDisposition.validate_create(
create_disposition)
self.write_disposition = BigQueryDisposition.validate_write(
write_disposition)
self.validate = validate
self.coder = coder or bigquery_tools.RowAsDictJsonCoder()
self.kms_key = kms_key
def display_data(self):
res = {}
if self.table_reference is not None:
tableSpec = '{}.{}'.format(
self.table_reference.datasetId, self.table_reference.tableId)
if self.table_reference.projectId is not None:
tableSpec = '{}:{}'.format(self.table_reference.projectId, tableSpec)
res['table'] = DisplayDataItem(tableSpec, label='Table')
res['validation'] = DisplayDataItem(
self.validate, label="Validation Enabled")
return res
def schema_as_json(self):
"""Returns the TableSchema associated with the sink as a JSON string."""
def schema_list_as_object(schema_list):
"""Returns a list of TableFieldSchema objects as a list of dicts."""
fields = []
for f in schema_list:
fs = {'name': f.name, 'type': f.type}
if f.description is not None:
fs['description'] = f.description
if f.mode is not None:
fs['mode'] = f.mode
if f.type.lower() == 'record':
fs['fields'] = schema_list_as_object(f.fields)
fields.append(fs)
return fields
return json.dumps(
{'fields': schema_list_as_object(self.table_schema.fields)})
@property
def format(self):
"""Sink format name required for remote execution."""
return 'bigquery'
def writer(self, test_bigquery_client=None, buffer_size=None):
return bigquery_tools.BigQueryWriter(
sink=self,
test_bigquery_client=test_bigquery_client,
buffer_size=buffer_size)
_KNOWN_TABLES = set()
class BigQueryWriteFn(DoFn):
"""A ``DoFn`` that streams writes to BigQuery once the table is created."""
DEFAULT_MAX_BUFFERED_ROWS = 2000
DEFAULT_MAX_BATCH_SIZE = 500
FAILED_ROWS = 'FailedRows'
STREAMING_API_LOGGING_FREQUENCY_SEC = 300
def __init__(
self,
batch_size,
schema=None,
create_disposition=None,
write_disposition=None,
kms_key=None,
test_client=None,
max_buffered_rows=None,
retry_strategy=None,
additional_bq_parameters=None,
ignore_insert_ids=False,
with_batched_input=False):
"""Initialize a WriteToBigQuery transform.
Args:
batch_size: Number of rows to be written to BQ per streaming API insert.
schema: The schema to be used if the BigQuery table to write has to be
created. This can be either specified as a 'bigquery.TableSchema' object
or a single string of the form 'field1:type1,field2:type2,field3:type3'
that defines a comma separated list of fields. Here 'type' should
specify the BigQuery type of the field. Single string based schemas do
not support nested fields, repeated fields, or specifying a BigQuery
mode for fields (mode will always be set to 'NULLABLE').
create_disposition: A string describing what happens if the table does not
exist. Possible values are:
- BigQueryDisposition.CREATE_IF_NEEDED: create if does not exist.
- BigQueryDisposition.CREATE_NEVER: fail the write if does not exist.
write_disposition: A string describing what happens if the table has
already some data. Possible values are:
- BigQueryDisposition.WRITE_TRUNCATE: delete existing rows.
- BigQueryDisposition.WRITE_APPEND: add to existing rows.
- BigQueryDisposition.WRITE_EMPTY: fail the write if table not empty.
For streaming pipelines WriteTruncate can not be used.
kms_key: Optional Cloud KMS key name for use when creating new tables.
test_client: Override the default bigquery client used for testing.
max_buffered_rows: The maximum number of rows that are allowed to stay
buffered when running dynamic destinations. When destinations are
dynamic, it is important to keep caches small even when a single
batch has not been completely filled up.
retry_strategy: The strategy to use when retrying streaming inserts
into BigQuery. Options are shown in bigquery_tools.RetryStrategy attrs.
additional_bq_parameters (dict, callable): A set of additional parameters
to be passed when creating a BigQuery table. These are passed when
triggering a load job for FILE_LOADS, and when creating a new table for
STREAMING_INSERTS.
ignore_insert_ids: When using the STREAMING_INSERTS method to write data
to BigQuery, `insert_ids` are a feature of BigQuery that support
deduplication of events. If your use case is not sensitive to
duplication of data inserted to BigQuery, set `ignore_insert_ids`
to True to increase the throughput for BQ writing. See:
https://cloud.google.com/bigquery/streaming-data-into-bigquery#disabling_best_effort_de-duplication
with_batched_input: Whether the input has already been batched per
destination. If not, perform best-effort batching per destination within
a bunble.
"""
self.schema = schema
self.test_client = test_client
self.create_disposition = create_disposition
self.write_disposition = write_disposition
if write_disposition in (BigQueryDisposition.WRITE_EMPTY,
BigQueryDisposition.WRITE_TRUNCATE):
raise ValueError(
'Write disposition %s is not supported for'
' streaming inserts to BigQuery' % write_disposition)
self._rows_buffer = []
self._reset_rows_buffer()
self._total_buffered_rows = 0
self.kms_key = kms_key
self._max_batch_size = batch_size or BigQueryWriteFn.DEFAULT_MAX_BATCH_SIZE
self._max_buffered_rows = (
max_buffered_rows or BigQueryWriteFn.DEFAULT_MAX_BUFFERED_ROWS)
self._retry_strategy = retry_strategy or RetryStrategy.RETRY_ALWAYS
self.ignore_insert_ids = ignore_insert_ids
self.with_batched_input = with_batched_input
self.additional_bq_parameters = additional_bq_parameters or {}
# accumulate the total time spent in exponential backoff
self._throttled_secs = Metrics.counter(
BigQueryWriteFn, "cumulativeThrottlingSeconds")
self.batch_size_metric = Metrics.distribution(self.__class__, "batch_size")
self.batch_latency_metric = Metrics.distribution(
self.__class__, "batch_latency_ms")
self.failed_rows_metric = Metrics.distribution(
self.__class__, "rows_failed_per_batch")
self.bigquery_wrapper = None
self.streaming_api_logging_frequency_sec = (
BigQueryWriteFn.STREAMING_API_LOGGING_FREQUENCY_SEC)
def display_data(self):
return {
'max_batch_size': self._max_batch_size,
'max_buffered_rows': self._max_buffered_rows,
'retry_strategy': self._retry_strategy,
'create_disposition': str(self.create_disposition),
'write_disposition': str(self.write_disposition),
'additional_bq_parameters': str(self.additional_bq_parameters),
'ignore_insert_ids': str(self.ignore_insert_ids)
}
def _reset_rows_buffer(self):
self._rows_buffer = collections.defaultdict(lambda: [])
@staticmethod
def get_table_schema(schema):
"""Transform the table schema into a bigquery.TableSchema instance.
Args:
schema: The schema to be used if the BigQuery table to write has to be
created. This is a dictionary object created in the WriteToBigQuery
transform.
Returns:
table_schema: The schema to be used if the BigQuery table to write has
to be created but in the bigquery.TableSchema format.
"""
if schema is None:
return schema
elif isinstance(schema, str):
return bigquery_tools.parse_table_schema_from_json(schema)
elif isinstance(schema, dict):
return bigquery_tools.parse_table_schema_from_json(json.dumps(schema))
else:
raise TypeError('Unexpected schema argument: %s.' % schema)
def start_bundle(self):
self._reset_rows_buffer()
if not self.bigquery_wrapper:
self.bigquery_wrapper = bigquery_tools.BigQueryWrapper(
client=self.test_client)
(
bigquery_tools.BigQueryWrapper.HISTOGRAM_METRIC_LOGGER.
minimum_logging_frequency_msec
) = self.streaming_api_logging_frequency_sec * 1000
self._backoff_calculator = iter(
retry.FuzzedExponentialIntervals(
initial_delay_secs=0.2, num_retries=10000, max_delay_secs=1500))
def _create_table_if_needed(self, table_reference, schema=None):
str_table_reference = '%s:%s.%s' % (
table_reference.projectId,
table_reference.datasetId,
table_reference.tableId)
if str_table_reference in _KNOWN_TABLES:
return
if self.create_disposition == BigQueryDisposition.CREATE_NEVER:
# If we never want to create the table, we assume it already exists,
# and avoid the get-or-create step.
return
_LOGGER.debug(
'Creating or getting table %s with schema %s.', table_reference, schema)
table_schema = self.get_table_schema(schema)
if table_reference.projectId is None:
table_reference.projectId = vp.RuntimeValueProvider.get_value(
'project', str, '')
self.bigquery_wrapper.get_or_create_table(
table_reference.projectId,
table_reference.datasetId,
table_reference.tableId,
table_schema,
self.create_disposition,
self.write_disposition,
additional_create_parameters=self.additional_bq_parameters)
_KNOWN_TABLES.add(str_table_reference)
def process(self, element, *schema_side_inputs):
destination = bigquery_tools.get_hashable_destination(element[0])
if callable(self.schema):
schema = self.schema(destination, *schema_side_inputs)
elif isinstance(self.schema, vp.ValueProvider):
schema = self.schema.get()
else:
schema = self.schema
self._create_table_if_needed(
bigquery_tools.parse_table_reference(destination), schema)
if not self.with_batched_input:
row_and_insert_id = element[1]
self._rows_buffer[destination].append(row_and_insert_id)
self._total_buffered_rows += 1
if len(self._rows_buffer[destination]) >= self._max_batch_size:
return self._flush_batch(destination)
elif self._total_buffered_rows >= self._max_buffered_rows:
return self._flush_all_batches()
else:
# The input is already batched per destination, flush the rows now.
batched_rows = element[1]
self._rows_buffer[destination].extend(batched_rows)
return self._flush_batch(destination)
def finish_bundle(self):
bigquery_tools.BigQueryWrapper.HISTOGRAM_METRIC_LOGGER.log_metrics(
reset_after_logging=True)
return self._flush_all_batches()
def _flush_all_batches(self):
_LOGGER.debug(
'Attempting to flush to all destinations. Total buffered: %s',
self._total_buffered_rows)
return itertools.chain(
*[
self._flush_batch(destination)
for destination in list(self._rows_buffer.keys())
if self._rows_buffer[destination]
])
def _flush_batch(self, destination):
# Flush the current batch of rows to BigQuery.
rows_and_insert_ids = self._rows_buffer[destination]
table_reference = bigquery_tools.parse_table_reference(destination)
if table_reference.projectId is None:
table_reference.projectId = vp.RuntimeValueProvider.get_value(
'project', str, '')
_LOGGER.debug(
'Flushing data to %s. Total %s rows.',
destination,
len(rows_and_insert_ids))
self.batch_size_metric.update(len(rows_and_insert_ids))
rows = [r[0] for r in rows_and_insert_ids]
if self.ignore_insert_ids:
insert_ids = [None for r in rows_and_insert_ids]
else:
insert_ids = [r[1] for r in rows_and_insert_ids]
while True:
start = time.time()
passed, errors = self.bigquery_wrapper.insert_rows(
project_id=table_reference.projectId,
dataset_id=table_reference.datasetId,
table_id=table_reference.tableId,
rows=rows,
insert_ids=insert_ids,
skip_invalid_rows=True)
self.batch_latency_metric.update((time.time() - start) * 1000)
failed_rows = [rows[entry['index']] for entry in errors]
should_retry = any(
RetryStrategy.should_retry(
self._retry_strategy, entry['errors'][0]['reason'])
for entry in errors)
if not passed:
self.failed_rows_metric.update(len(failed_rows))
message = (
'There were errors inserting to BigQuery. Will{} retry. '
'Errors were {}'.format(("" if should_retry else " not"), errors))
if should_retry:
_LOGGER.warning(message)
else:
_LOGGER.error(message)
rows = failed_rows
if not should_retry:
break
else:
retry_backoff = next(self._backoff_calculator)
_LOGGER.info(
'Sleeping %s seconds before retrying insertion.', retry_backoff)
time.sleep(retry_backoff)
self._throttled_secs.inc(retry_backoff)
self._total_buffered_rows -= len(self._rows_buffer[destination])
del self._rows_buffer[destination]
return [
pvalue.TaggedOutput(
BigQueryWriteFn.FAILED_ROWS,
GlobalWindows.windowed_value((destination, row)))
for row in failed_rows
]
# The number of shards per destination when writing via streaming inserts.
DEFAULT_SHARDS_PER_DESTINATION = 500
# The max duration a batch of elements is allowed to be buffered before being
# flushed to BigQuery.
DEFAULT_BATCH_BUFFERING_DURATION_LIMIT_SEC = 0.2
class _StreamToBigQuery(PTransform):
def __init__(
self,
table_reference,
table_side_inputs,
schema_side_inputs,
schema,
batch_size,
create_disposition,
write_disposition,
kms_key,
retry_strategy,
additional_bq_parameters,
ignore_insert_ids,
with_auto_sharding,
test_client=None):
self.table_reference = table_reference
self.table_side_inputs = table_side_inputs
self.schema_side_inputs = schema_side_inputs
self.schema = schema
self.batch_size = batch_size
self.create_disposition = create_disposition
self.write_disposition = write_disposition
self.kms_key = kms_key
self.retry_strategy = retry_strategy
self.test_client = test_client
self.additional_bq_parameters = additional_bq_parameters
self.ignore_insert_ids = ignore_insert_ids
self.with_auto_sharding = with_auto_sharding
class InsertIdPrefixFn(DoFn):
def start_bundle(self):
self.prefix = str(uuid.uuid4())
self._row_count = 0
def process(self, element):
key = element[0]
value = element[1]
insert_id = '%s-%s' % (self.prefix, self._row_count)
self._row_count += 1
yield (key, (value, insert_id))
def expand(self, input):
bigquery_write_fn = BigQueryWriteFn(
schema=self.schema,
batch_size=self.batch_size,
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
kms_key=self.kms_key,
retry_strategy=self.retry_strategy,
test_client=self.test_client,
additional_bq_parameters=self.additional_bq_parameters,
ignore_insert_ids=self.ignore_insert_ids,
with_batched_input=self.with_auto_sharding)
def _add_random_shard(element):
key = element[0]
value = element[1]
return ((key, random.randint(0, DEFAULT_SHARDS_PER_DESTINATION)), value)
def _restore_table_ref(sharded_table_ref_elems_kv):
sharded_table_ref = sharded_table_ref_elems_kv[0]
table_ref = bigquery_tools.parse_table_reference(sharded_table_ref)
return (table_ref, sharded_table_ref_elems_kv[1])
tagged_data = (
input
| 'AppendDestination' >> beam.ParDo(
bigquery_tools.AppendDestinationsFn(self.table_reference),
*self.table_side_inputs)
| 'AddInsertIds' >> beam.ParDo(_StreamToBigQuery.InsertIdPrefixFn())
|
'ToHashableTableRef' >> beam.Map(bigquery_tools.to_hashable_table_ref))
if not self.with_auto_sharding:
tagged_data = (
tagged_data
| 'WithFixedSharding' >> beam.Map(_add_random_shard)
| 'CommitInsertIds' >> ReshufflePerKey()
| 'DropShard' >> beam.Map(lambda kv: (kv[0][0], kv[1])))
else:
# Auto-sharding is achieved via GroupIntoBatches.WithShardedKey
# transform which shards, groups and at the same time batches the table
# rows to be inserted to BigQuery.
# Firstly the keys of tagged_data (table references) are converted to a
# hashable format. This is needed to work with the keyed states used by
# GroupIntoBatches. After grouping and batching is done, original table
# references are restored.
tagged_data = (
tagged_data
| 'WithAutoSharding' >> beam.GroupIntoBatches.WithShardedKey(
(self.batch_size or BigQueryWriteFn.DEFAULT_MAX_BUFFERED_ROWS),
DEFAULT_BATCH_BUFFERING_DURATION_LIMIT_SEC)
| 'DropShard' >> beam.Map(lambda kv: (kv[0].key, kv[1])))
return (
tagged_data
| 'FromHashableTableRef' >> beam.Map(_restore_table_ref)
| 'StreamInsertRows' >> ParDo(
bigquery_write_fn, *self.schema_side_inputs).with_outputs(
BigQueryWriteFn.FAILED_ROWS, main='main'))
# Flag to be passed to WriteToBigQuery to force schema autodetection
SCHEMA_AUTODETECT = 'SCHEMA_AUTODETECT'
class WriteToBigQuery(PTransform):
"""Write data to BigQuery.
This transform receives a PCollection of elements to be inserted into BigQuery
tables. The elements would come in as Python dictionaries, or as `TableRow`
instances.
"""
class Method(object):
DEFAULT = 'DEFAULT'
STREAMING_INSERTS = 'STREAMING_INSERTS'
FILE_LOADS = 'FILE_LOADS'
def __init__(
self,
table,
dataset=None,
project=None,
schema=None,
create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=BigQueryDisposition.WRITE_APPEND,
kms_key=None,
batch_size=None,
max_file_size=None,
max_files_per_bundle=None,
test_client=None,
custom_gcs_temp_location=None,
method=None,
insert_retry_strategy=None,
additional_bq_parameters=None,
table_side_inputs=None,
schema_side_inputs=None,
triggering_frequency=None,
validate=True,
temp_file_format=None,
ignore_insert_ids=False,
# TODO(BEAM-11857): Switch the default when the feature is mature.
with_auto_sharding=False):
"""Initialize a WriteToBigQuery transform.
Args:
table (str, callable, ValueProvider): The ID of the table, or a callable
that returns it. The ID must contain only letters ``a-z``, ``A-Z``,
numbers ``0-9``, or connectors ``-_``. If dataset argument is
:data:`None` then the table argument must contain the entire table
reference specified as: ``'DATASET.TABLE'``
or ``'PROJECT:DATASET.TABLE'``. If it's a callable, it must receive one
argument representing an element to be written to BigQuery, and return
a TableReference, or a string table name as specified above.
dataset (str): The ID of the dataset containing this table or
:data:`None` if the table reference is specified entirely by the table
argument.
project (str): The ID of the project containing this table or
:data:`None` if the table reference is specified entirely by the table
argument.
schema (str,dict,ValueProvider,callable): The schema to be used if the
BigQuery table to write has to be created. This can be either specified
as a :class:`~apache_beam.io.gcp.internal.clients.bigquery.\
bigquery_v2_messages.TableSchema`. or a `ValueProvider` that has a JSON string,
or a python dictionary, or the string or dictionary itself,
object or a single string of the form
``'field1:type1,field2:type2,field3:type3'`` that defines a comma
separated list of fields. Here ``'type'`` should specify the BigQuery
type of the field. Single string based schemas do not support nested
fields, repeated fields, or specifying a BigQuery mode for fields
(mode will always be set to ``'NULLABLE'``).
If a callable, then it should receive a destination (in the form of
a str, and return a str, dict or TableSchema).
One may also pass ``SCHEMA_AUTODETECT`` here when using JSON-based
file loads, and BigQuery will try to infer the schema for the files
that are being loaded.
create_disposition (BigQueryDisposition): A string describing what
happens if the table does not exist. Possible values are:
* :attr:`BigQueryDisposition.CREATE_IF_NEEDED`: create if does not
exist.
* :attr:`BigQueryDisposition.CREATE_NEVER`: fail the write if does not
exist.
write_disposition (BigQueryDisposition): A string describing what happens
if the table has already some data. Possible values are:
* :attr:`BigQueryDisposition.WRITE_TRUNCATE`: delete existing rows.
* :attr:`BigQueryDisposition.WRITE_APPEND`: add to existing rows.
* :attr:`BigQueryDisposition.WRITE_EMPTY`: fail the write if table not
empty.
For streaming pipelines WriteTruncate can not be used.
kms_key (str): Optional Cloud KMS key name for use when creating new
tables.
batch_size (int): Number of rows to be written to BQ per streaming API
insert. The default is 500.
test_client: Override the default bigquery client used for testing.
max_file_size (int): The maximum size for a file to be written and then
loaded into BigQuery. The default value is 4TB, which is 80% of the
limit of 5TB for BigQuery to load any file.
max_files_per_bundle(int): The maximum number of files to be concurrently
written by a worker. The default here is 20. Larger values will allow
writing to multiple destinations without having to reshard - but they
increase the memory burden on the workers.
custom_gcs_temp_location (str): A GCS location to store files to be used
for file loads into BigQuery. By default, this will use the pipeline's
temp_location, but for pipelines whose temp_location is not appropriate
for BQ File Loads, users should pass a specific one.
method: The method to use to write to BigQuery. It may be
STREAMING_INSERTS, FILE_LOADS, or DEFAULT. An introduction on loading
data to BigQuery: https://cloud.google.com/bigquery/docs/loading-data.
DEFAULT will use STREAMING_INSERTS on Streaming pipelines and
FILE_LOADS on Batch pipelines.
insert_retry_strategy: The strategy to use when retrying streaming inserts
into BigQuery. Options are shown in bigquery_tools.RetryStrategy attrs.
Default is to retry always. This means that whenever there are rows
that fail to be inserted to BigQuery, they will be retried indefinitely.
Other retry strategy settings will produce a deadletter PCollection
as output. Appropriate values are:
* `RetryStrategy.RETRY_ALWAYS`: retry all rows if
there are any kind of errors. Note that this will hold your pipeline
back if there are errors until you cancel or update it.
* `RetryStrategy.RETRY_NEVER`: rows with errors
will not be retried. Instead they will be output to a dead letter
queue under the `'FailedRows'` tag.
* `RetryStrategy.RETRY_ON_TRANSIENT_ERROR`: retry
rows with transient errors (e.g. timeouts). Rows with permanent errors
will be output to dead letter queue under `'FailedRows'` tag.
additional_bq_parameters (callable): A function that returns a dictionary
with additional parameters to pass to BQ when creating / loading data
into a table. These can be 'timePartitioning', 'clustering', etc. They
are passed directly to the job load configuration. See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload
table_side_inputs (tuple): A tuple with ``AsSideInput`` PCollections to be
passed to the table callable (if one is provided).
schema_side_inputs: A tuple with ``AsSideInput`` PCollections to be
passed to the schema callable (if one is provided).
triggering_frequency (int): Every triggering_frequency duration, a
BigQuery load job will be triggered for all the data written since
the last load job. BigQuery has limits on how many load jobs can be
triggered per day, so be careful not to set this duration too low, or
you may exceed daily quota. Often this is set to 5 or 10 minutes to
ensure that the project stays well under the BigQuery quota.
See https://cloud.google.com/bigquery/quota-policy for more information
about BigQuery quotas.
validate: Indicates whether to perform validation checks on
inputs. This parameter is primarily used for testing.
temp_file_format: The format to use for file loads into BigQuery. The
options are NEWLINE_DELIMITED_JSON or AVRO, with NEWLINE_DELIMITED_JSON
being used by default. For advantages and limitations of the two
formats, see
https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro
and
https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-json.
ignore_insert_ids: When using the STREAMING_INSERTS method to write data
to BigQuery, `insert_ids` are a feature of BigQuery that support
deduplication of events. If your use case is not sensitive to
duplication of data inserted to BigQuery, set `ignore_insert_ids`
to True to increase the throughput for BQ writing. See:
https://cloud.google.com/bigquery/streaming-data-into-bigquery#disabling_best_effort_de-duplication
with_auto_sharding: Experimental. If true, enables using a dynamically
determined number of shards to write to BigQuery. This can be used for
both FILE_LOADS and STREAMING_INSERTS. Only applicable to unbounded
input.
"""
self._table = table
self._dataset = dataset
self._project = project
self.table_reference = bigquery_tools.parse_table_reference(
table, dataset, project)
self.create_disposition = BigQueryDisposition.validate_create(
create_disposition)
self.write_disposition = BigQueryDisposition.validate_write(
write_disposition)
if schema == SCHEMA_AUTODETECT:
self.schema = schema
else:
self.schema = bigquery_tools.get_dict_table_schema(schema)
self.batch_size = batch_size
self.kms_key = kms_key
self.test_client = test_client
# TODO(pabloem): Consider handling ValueProvider for this location.
self.custom_gcs_temp_location = custom_gcs_temp_location
self.max_file_size = max_file_size
self.max_files_per_bundle = max_files_per_bundle
self.method = method or WriteToBigQuery.Method.DEFAULT
self.triggering_frequency = triggering_frequency
self.with_auto_sharding = with_auto_sharding
self.insert_retry_strategy = insert_retry_strategy
self._validate = validate
self._temp_file_format = temp_file_format or bigquery_tools.FileFormat.JSON
self.additional_bq_parameters = additional_bq_parameters or {}
self.table_side_inputs = table_side_inputs or ()
self.schema_side_inputs = schema_side_inputs or ()
self._ignore_insert_ids = ignore_insert_ids
# Dict/schema methods were moved to bigquery_tools, but keep references
# here for backward compatibility.
get_table_schema_from_string = \
staticmethod(bigquery_tools.get_table_schema_from_string)
table_schema_to_dict = staticmethod(bigquery_tools.table_schema_to_dict)
get_dict_table_schema = staticmethod(bigquery_tools.get_dict_table_schema)
def _compute_method(self, experiments, is_streaming_pipeline):
# If the new BQ sink is not activated for experiment flags, then we use
# streaming inserts by default (it gets overridden in dataflow_runner.py).
if self.method == self.Method.DEFAULT and is_streaming_pipeline:
return self.Method.STREAMING_INSERTS
elif self.method == self.Method.DEFAULT and not is_streaming_pipeline:
return self.Method.FILE_LOADS
else:
return self.method
def expand(self, pcoll):
p = pcoll.pipeline
if (isinstance(self.table_reference, TableReference) and
self.table_reference.projectId is None):
self.table_reference.projectId = pcoll.pipeline.options.view_as(
GoogleCloudOptions).project
# TODO(pabloem): Use a different method to determine if streaming or batch.
is_streaming_pipeline = p.options.view_as(StandardOptions).streaming
if not is_streaming_pipeline and self.with_auto_sharding:
raise ValueError(
'with_auto_sharding is not applicable to batch pipelines.')
experiments = p.options.view_as(DebugOptions).experiments or []
method_to_use = self._compute_method(experiments, is_streaming_pipeline)
if method_to_use == WriteToBigQuery.Method.STREAMING_INSERTS:
if self.schema == SCHEMA_AUTODETECT:
raise ValueError(
'Schema auto-detection is not supported for streaming '
'inserts into BigQuery. Only for File Loads.')
if self.triggering_frequency:
raise ValueError(
'triggering_frequency can only be used with '
'FILE_LOADS method of writing to BigQuery.')
outputs = pcoll | _StreamToBigQuery(
table_reference=self.table_reference,
table_side_inputs=self.table_side_inputs,
schema_side_inputs=self.schema_side_inputs,
schema=self.schema,
batch_size=self.batch_size,
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
kms_key=self.kms_key,
retry_strategy=self.insert_retry_strategy,
additional_bq_parameters=self.additional_bq_parameters,
ignore_insert_ids=self._ignore_insert_ids,
with_auto_sharding=self.with_auto_sharding,
test_client=self.test_client)
return {BigQueryWriteFn.FAILED_ROWS: outputs[BigQueryWriteFn.FAILED_ROWS]}
else:
if self._temp_file_format == bigquery_tools.FileFormat.AVRO:
if self.schema == SCHEMA_AUTODETECT:
raise ValueError(
'Schema auto-detection is not supported when using Avro based '
'file loads into BigQuery. Please specify a schema or set '
'temp_file_format="NEWLINE_DELIMITED_JSON"')
if self.schema is None:
raise ValueError(
'A schema must be provided when writing to BigQuery using '
'Avro based file loads')
from apache_beam.io.gcp import bigquery_file_loads
return pcoll | bigquery_file_loads.BigQueryBatchFileLoads(
destination=self.table_reference,
schema=self.schema,
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
triggering_frequency=self.triggering_frequency,
with_auto_sharding=self.with_auto_sharding,
temp_file_format=self._temp_file_format,
max_file_size=self.max_file_size,
max_files_per_bundle=self.max_files_per_bundle,
custom_gcs_temp_location=self.custom_gcs_temp_location,
test_client=self.test_client,
table_side_inputs=self.table_side_inputs,
schema_side_inputs=self.schema_side_inputs,
additional_bq_parameters=self.additional_bq_parameters,
validate=self._validate,
is_streaming_pipeline=is_streaming_pipeline)
def display_data(self):
res = {}
if self.table_reference is not None and isinstance(self.table_reference,
TableReference):
tableSpec = '{}.{}'.format(
self.table_reference.datasetId, self.table_reference.tableId)
if self.table_reference.projectId is not None:
tableSpec = '{}:{}'.format(self.table_reference.projectId, tableSpec)
res['table'] = DisplayDataItem(tableSpec, label='Table')
return res
def to_runner_api_parameter(self, context):
from apache_beam.internal import pickler
# It'd be nice to name these according to their actual
# names/positions in the orignal argument list, but such a
# transformation is currently irreversible given how
# remove_objects_from_args and insert_values_in_args
# are currently implemented.
def serialize(side_inputs):
return {(SIDE_INPUT_PREFIX + '%s') % ix:
si.to_runner_api(context).SerializeToString()
for ix,
si in enumerate(side_inputs)}
table_side_inputs = serialize(self.table_side_inputs)
schema_side_inputs = serialize(self.schema_side_inputs)
config = {
'table': self._table,
'dataset': self._dataset,
'project': self._project,
'schema': self.schema,
'create_disposition': self.create_disposition,
'write_disposition': self.write_disposition,
'kms_key': self.kms_key,
'batch_size': self.batch_size,
'max_file_size': self.max_file_size,
'max_files_per_bundle': self.max_files_per_bundle,
'custom_gcs_temp_location': self.custom_gcs_temp_location,
'method': self.method,
'insert_retry_strategy': self.insert_retry_strategy,
'additional_bq_parameters': self.additional_bq_parameters,
'table_side_inputs': table_side_inputs,
'schema_side_inputs': schema_side_inputs,
'triggering_frequency': self.triggering_frequency,
'validate': self._validate,
'temp_file_format': self._temp_file_format,
'ignore_insert_ids': self._ignore_insert_ids,
'with_auto_sharding': self.with_auto_sharding,
}
return 'beam:transform:write_to_big_query:v0', pickler.dumps(config)
@PTransform.register_urn('beam:transform:write_to_big_query:v0', bytes)
def from_runner_api(unused_ptransform, payload, context):
from apache_beam.internal import pickler
from apache_beam.portability.api.beam_runner_api_pb2 import SideInput
config = pickler.loads(payload)
def deserialize(side_inputs):
deserialized_side_inputs = {}
for k, v in side_inputs.items():
side_input = SideInput()
side_input.ParseFromString(v)
deserialized_side_inputs[k] = side_input
# This is an ordered list stored as a dict (see the comments in
# to_runner_api_parameter above).
indexed_side_inputs = [(
get_sideinput_index(tag),
pvalue.AsSideInput.from_runner_api(si, context)) for tag,
si in deserialized_side_inputs.items()]
return [si for _, si in sorted(indexed_side_inputs)]
config['table_side_inputs'] = deserialize(config['table_side_inputs'])
config['schema_side_inputs'] = deserialize(config['schema_side_inputs'])
return WriteToBigQuery(**config)
class ReadFromBigQuery(PTransform):
"""Read data from BigQuery.
This PTransform uses a BigQuery export job to take a snapshot of the table
on GCS, and then reads from each produced file. File format is Avro by
default.
Args:
table (str, callable, ValueProvider): The ID of the table, or a callable
that returns it. The ID must contain only letters ``a-z``, ``A-Z``,
numbers ``0-9``, or underscores ``_``. If dataset argument is
:data:`None` then the table argument must contain the entire table
reference specified as: ``'DATASET.TABLE'``
or ``'PROJECT:DATASET.TABLE'``. If it's a callable, it must receive one
argument representing an element to be written to BigQuery, and return
a TableReference, or a string table name as specified above.
dataset (str): The ID of the dataset containing this table or
:data:`None` if the table reference is specified entirely by the table
argument.
project (str): The ID of the project containing this table.
query (str, ValueProvider): A query to be used instead of arguments
table, dataset, and project.
validate (bool): If :data:`True`, various checks will be done when source
gets initialized (e.g., is table present?). This should be
:data:`True` for most scenarios in order to catch errors as early as
possible (pipeline construction instead of pipeline execution). It
should be :data:`False` if the table is created during pipeline
execution by a previous step.
coder (~apache_beam.coders.coders.Coder): The coder for the table
rows. If :data:`None`, then the default coder is
_JsonToDictCoder, which will interpret every row as a JSON
serialized dictionary.
use_standard_sql (bool): Specifies whether to use BigQuery's standard SQL
dialect for this query. The default value is :data:`False`.
If set to :data:`True`, the query will use BigQuery's updated SQL
dialect with improved standards compliance.
This parameter is ignored for table inputs.
flatten_results (bool): Flattens all nested and repeated fields in the
query results. The default value is :data:`True`.
kms_key (str): Optional Cloud KMS key name for use when creating new
temporary tables.
gcs_location (str, ValueProvider): The name of the Google Cloud Storage
bucket where the extracted table should be written as a string or
a :class:`~apache_beam.options.value_provider.ValueProvider`. If
:data:`None`, then the temp_location parameter is used.
bigquery_job_labels (dict): A dictionary with string labels to be passed
to BigQuery export and query jobs created by this transform. See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/\
Job#JobConfiguration
use_json_exports (bool): By default, this transform works by exporting
BigQuery data into Avro files, and reading those files. With this
parameter, the transform will instead export to JSON files. JSON files
are slower to read due to their larger size.
When using JSON exports, the BigQuery types for DATE, DATETIME, TIME, and
TIMESTAMP will be exported as strings. This behavior is consistent with
BigQuerySource.
When using Avro exports, these fields will be exported as native Python
types (datetime.date, datetime.datetime, datetime.datetime,
and datetime.datetime respectively). Avro exports are recommended.
To learn more about BigQuery types, and Time-related type
representations, see: https://cloud.google.com/bigquery/docs/reference/\
standard-sql/data-types
To learn more about type conversions between BigQuery and Avro, see:
https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro\
#avro_conversions
temp_dataset (``apache_beam.io.gcp.internal.clients.bigquery.\
DatasetReference``):
The dataset in which to create temporary tables when performing file
loads. By default, a new dataset is created in the execution project for
temporary tables.
"""
COUNTER = 0
def __init__(self, gcs_location=None, *args, **kwargs):
if gcs_location:
if not isinstance(gcs_location, (str, ValueProvider)):
raise TypeError(
'%s: gcs_location must be of type string'
' or ValueProvider; got %r instead' %
(self.__class__.__name__, type(gcs_location)))
if isinstance(gcs_location, str):
gcs_location = StaticValueProvider(str, gcs_location)
self.gcs_location = gcs_location
self._args = args
self._kwargs = kwargs
def expand(self, pcoll):
# TODO(BEAM-11115): Make ReadFromBQ rely on ReadAllFromBQ implementation.
temp_location = pcoll.pipeline.options.view_as(
GoogleCloudOptions).temp_location
job_name = pcoll.pipeline.options.view_as(GoogleCloudOptions).job_name
gcs_location_vp = self.gcs_location
unique_id = str(uuid.uuid4())[0:10]
def file_path_to_remove(unused_elm):
gcs_location = bigquery_export_destination_uri(
gcs_location_vp, temp_location, unique_id, True)
return gcs_location + '/'
files_to_remove_pcoll = beam.pvalue.AsList(
pcoll.pipeline
| 'FilesToRemoveImpulse' >> beam.Create([None])
| 'MapFilesToRemove' >> beam.Map(file_path_to_remove))
try:
step_name = self.label
except AttributeError:
step_name = 'ReadFromBigQuery_%d' % ReadFromBigQuery.COUNTER
ReadFromBigQuery.COUNTER += 1
return (
pcoll
| beam.io.Read(
_CustomBigQuerySource(
gcs_location=self.gcs_location,
pipeline_options=pcoll.pipeline.options,
job_name=job_name,
step_name=step_name,
unique_id=unique_id,
*self._args,
**self._kwargs))
| _PassThroughThenCleanup(files_to_remove_pcoll))
class ReadFromBigQueryRequest:
"""
Class that defines data to read from BQ.
"""
def __init__(
self,
query: str = None,
use_standard_sql: bool = True,
table: Union[str, TableReference] = None,
flatten_results: bool = False):
"""
Only one of query or table should be specified.
:param query: SQL query to fetch data.
:param use_standard_sql:
Specifies whether to use BigQuery's standard SQL dialect for this query.
The default value is :data:`True`. If set to :data:`False`,
the query will use BigQuery's legacy SQL dialect.
This parameter is ignored for table inputs.
:param table:
The ID of the table to read. The ID must contain only letters
``a-z``, ``A-Z``, numbers ``0-9``, or underscores ``_``. Table should
define project and dataset (ex.: ``'PROJECT:DATASET.TABLE'``).
:param flatten_results:
Flattens all nested and repeated fields in the query results.
The default value is :data:`False`.
"""
self.flatten_results = flatten_results
self.query = query
self.use_standard_sql = use_standard_sql
self.table = table
self.validate()
# We use this internal object ID to generate BigQuery export directories.
self.obj_id = random.randint(0, 100000)
def validate(self):
if self.table is not None and self.query is not None:
raise ValueError(
'Both a BigQuery table and a query were specified.'
' Please specify only one of these.')
elif self.table is None and self.query is None:
raise ValueError('A BigQuery table or a query must be specified')
if self.table is not None:
if isinstance(self.table, str):
assert self.table.find('.'), (
'Expected a table reference '
'(PROJECT:DATASET.TABLE or DATASET.TABLE) instead of %s'
% self.table)
@experimental()
class ReadAllFromBigQuery(PTransform):
"""Read data from BigQuery.
PTransform:ReadFromBigQueryRequest->Rows
This PTransform uses a BigQuery export job to take a snapshot of the table
on GCS, and then reads from each produced file. Data is exported into
a new subdirectory for each export using UUIDs generated in
`ReadFromBigQueryRequest` objects.
It is recommended not to use this PTransform for streaming jobs on
GlobalWindow, since it will not be able to cleanup snapshots.
Args:
gcs_location (str): The name of the Google Cloud Storage
bucket where the extracted table should be written as a string. If
:data:`None`, then the temp_location parameter is used.
validate (bool): If :data:`True`, various checks will be done when source
gets initialized (e.g., is table present?).
kms_key (str): Experimental. Optional Cloud KMS key name for use when
creating new temporary tables.
"""
COUNTER = 0
def __init__(
self,
gcs_location: Union[str, ValueProvider] = None,
validate: bool = False,
kms_key: str = None,
temp_dataset: Union[str, DatasetReference] = None,
bigquery_job_labels: Dict[str, str] = None):
if gcs_location:
if not isinstance(gcs_location, (str, ValueProvider)):
raise TypeError(
'%s: gcs_location must be of type string'
' or ValueProvider; got %r instead' %
(self.__class__.__name__, type(gcs_location)))
self.gcs_location = gcs_location
self.validate = validate
self.kms_key = kms_key
self.bigquery_job_labels = bigquery_job_labels
self.temp_dataset = temp_dataset
def expand(self, pcoll):
job_name = pcoll.pipeline.options.view_as(GoogleCloudOptions).job_name
project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
unique_id = str(uuid.uuid4())[0:10]
try:
step_name = self.label
except AttributeError:
step_name = 'ReadAllFromBigQuery_%d' % ReadAllFromBigQuery.COUNTER
ReadAllFromBigQuery.COUNTER += 1
sources_to_read, cleanup_locations = (
pcoll
| beam.ParDo(
_BigQueryReadSplit(
options=pcoll.pipeline.options,
gcs_location=self.gcs_location,
bigquery_job_labels=self.bigquery_job_labels,
job_name=job_name,
step_name=step_name,
unique_id=unique_id,
kms_key=self.kms_key,
project=project,
temp_dataset=self.temp_dataset)).with_outputs(
"location_to_cleanup", main="files_to_read")
)
return (
sources_to_read
| SDFBoundedSourceReader(data_to_display=self.display_data())
| _PassThroughThenCleanup(beam.pvalue.AsIter(cleanup_locations)))
| apache-2.0 |
jacobajit/ion | intranet/apps/eighth/forms/admin/blocks.py | 1 | 1644 | # -*- coding: utf-8 -*-
from django import forms
from django.core.validators import RegexValidator
from ...models import EighthBlock
block_letter_validator = RegexValidator(r"^[a-z A-Z0-9_-]{1,10}$",
"A block letter must be less than 10 characters long, and include only alphanumeric characters and spaces.")
class BlockDisplayField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return "{}: {}".format(obj.id, str(obj))
class BlockSelectionForm(forms.Form):
def __init__(self, label="Block", exclude_before_date=None, only_locked=False, *args, **kwargs):
super(BlockSelectionForm, self).__init__(*args, **kwargs)
filter_params = {}
if exclude_before_date is not None:
filter_params["date__gte"] = exclude_before_date
if only_locked:
filter_params["locked"] = True
queryset = EighthBlock.objects.filter(**filter_params)
self.fields["block"] = BlockDisplayField(queryset=queryset, label=label, empty_label="Select a block")
class QuickBlockForm(forms.ModelForm):
block_letter = forms.CharField(max_length=10, validators=[block_letter_validator])
class Meta:
model = EighthBlock
fields = ["date", "block_letter"]
class BlockForm(forms.ModelForm):
block_letter = forms.CharField(max_length=10, validators=[block_letter_validator])
class Meta:
model = EighthBlock
fields = [
"date",
"block_letter",
"locked",
# "override_blocks",
"signup_time",
"comments"
]
| gpl-2.0 |
ScottBuchanan/eden | modules/s3/s3codecs/svg.py | 14 | 9708 | # -*- coding: utf-8 -*-
"""
S3 SVG codec
@copyright: 2013-14 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3SVG",)
import os
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.contenttype import contenttype
from gluon.storage import Storage
from gluon.streamer import DEFAULT_CHUNK_SIZE
from ..s3codec import S3Codec
from ..s3utils import s3_unicode, s3_strip_markup
# =============================================================================
class S3SVG(S3Codec):
"""
Simple SVG format codec
"""
# -------------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
pass
# -------------------------------------------------------------------------
def extractResource(self, resource, list_fields):
"""
Extract the items from the resource
@param resource: the resource
@param list_fields: fields to include in list views
"""
title = self.crud_string(resource.tablename, "title_list")
get_vars = Storage(current.request.get_vars)
get_vars["iColumns"] = len(list_fields)
query, orderby, left = resource.datatable_filter(list_fields, get_vars)
resource.add_filter(query)
data = resource.select(list_fields,
left=left,
limit=None,
orderby=orderby,
represent=True,
show_links=False)
rfields = data["rfields"]
types = []
colnames = []
heading = {}
for rfield in rfields:
if rfield.show:
colnames.append(rfield.colname)
heading[rfield.colname] = rfield.label
if rfield.virtual:
types.append("string")
else:
types.append(rfield.ftype)
items = data["rows"]
return (title, types, colnames, heading, items)
# -------------------------------------------------------------------------
def encode(self, resource, **attr):
"""
Export data as a Scalable Vector Graphic
@param resource: the source of the data that is to be encoded
as an SVG. This may be:
resource: the resource
item: a list of pre-fetched values
the headings are in the first row
the data types are in the second row
@param attr: dictionary of parameters:
* title: The export filename
* list_fields: Fields to include in list views
"""
# Get the attributes
#list_fields = attr.get("list_fields")
#if not list_fields:
# list_fields = resource.list_fields()
# @ToDo: PostGIS can extract SVG from DB (like GeoJSON)
# http://postgis.refractions.net/documentation/manual-1.4/ST_AsSVG.html
if resource.prefix == "gis" and resource.name == "location":
#list_fields.append("wkt")
list_fields = ["wkt"]
elif "location_id$wkt" not in list_fields:
#list_fields.append("location_id$wkt")
list_fields = ["location_id$wkt"]
# Clear the WKT represent
current.s3db.gis_location.wkt.represent = None
# Extract the data from the resource
(_title, types, lfields, headers, items) = self.extractResource(resource,
list_fields)
# @ToDo: Support multiple records
wkt = items[0]["gis_location.wkt"]
if not wkt:
current.log.error("No Geometry!")
# Convert to SVG
title = attr.get("title", resource._ids[0])
filename = "%s.svg" % title
filepath = self.write_file(filename, wkt, **attr)
# Response headers
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".svg")
response.headers["Content-disposition"] = disposition
stream = open(filepath)
return response.stream(stream, chunk_size=DEFAULT_CHUNK_SIZE,
request=current.request)
# -------------------------------------------------------------------------
@staticmethod
def write_file(filename, wkt, **attr):
from xml.etree import ElementTree as et
# Create an SVG XML element
# @ToDo: Allow customisation of height/width
iheight = 74
height = str(iheight)
iwidth = 74
width = str(iwidth)
doc = et.Element("svg", width=width, height=height, version="1.1", xmlns="http://www.w3.org/2000/svg")
# Convert WKT
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
shape = wkt_loads(wkt)
geom_type = shape.geom_type
if geom_type not in ("MultiPolygon", "Polygon"):
current.log.error("Unsupported Geometry", geom_type)
return
# Scale Points & invert Y axis
from shapely import affinity
bounds = shape.bounds # (minx, miny, maxx, maxy)
swidth = abs(bounds[2] - bounds[0])
sheight = abs(bounds[3] - bounds[1])
width_multiplier = iwidth / swidth
height_multiplier = iheight / sheight
multiplier = min(width_multiplier, height_multiplier) * 0.9 # Padding
shape = affinity.scale(shape, xfact=multiplier, yfact=-multiplier, origin="centroid")
# Center Shape
centroid = shape.centroid
xoff = (iwidth / 2) - centroid.x
yoff = (iheight / 2) - centroid.y
shape = affinity.translate(shape, xoff=xoff, yoff=yoff)
if geom_type == "MultiPolygon":
polygons = shape.geoms
elif geom_type == "Polygon":
polygons = [shape]
# @ToDo:
#elif geom_type == "LineString":
# _points = shape
#elif geom_type == "Point":
# _points = [shape]
points = []
pappend = points.append
for polygon in polygons:
_points = polygon.exterior.coords
for point in _points:
pappend("%s,%s" % (point[0], point[1]))
points = " ".join(points)
# Wrap in Square for Icon
# @ToDo: Anti-Aliased Rounded Corners
# @ToDo: Make optional
fill = "rgb(167, 192, 210)"
stroke = "rgb(114, 129, 145)"
et.SubElement(doc, "rect", width=width, height=height, fill=fill, stroke=stroke)
# @ToDo: Allow customisation of options
fill = "rgb(225, 225, 225)"
stroke = "rgb(165, 165, 165)"
et.SubElement(doc, "polygon", points=points, fill=fill, stroke=stroke)
# @ToDo: Add Attributes from list_fields
# Write out File
path = os.path.join(current.request.folder, "static", "cache", "svg")
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.join(path, filename)
with open(filepath, "w") as f:
# ElementTree 1.2 doesn't write the SVG file header errata, so do that manually
f.write("<?xml version=\"1.0\" standalone=\"no\"?>\n")
f.write("<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\n")
f.write("\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n")
f.write(et.tostring(doc))
return filepath
# -------------------------------------------------------------------------
def decode(self, resource, source, **attr):
"""
Import data from a Scalable Vector Graphic
@param resource: the S3Resource
@param source: the source
@return: an S3XML ElementTree
@ToDo: Handle encodings within SVG other than UTF-8
"""
# @ToDo: Complete this!
raise NotImplementedError
return root
# End =========================================================================
| mit |
HoracioAlvarado/fwd | venv/Lib/site-packages/pip/_vendor/packaging/version.py | 1151 | 11556 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
| mit |
tpodowd/boto | tests/integration/sqs/test_cert_verification.py | 126 | 1535 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on SQS endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.sqs
class SQSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
sqs = True
regions = boto.sqs.regions()
def sample_service_call(self, conn):
conn.get_all_queues()
| mit |
Natim/sentry | src/sentry/migrations/0003_auto__add_field_message_group__del_field_groupedmessage_server_name.py | 36 | 4187 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Message.group'
db.add_column('sentry_message', 'group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(blank=True, related_name='message_set', null=True, to=orm['sentry.GroupedMessage']), keep_default=False)
# Deleting field 'GroupedMessage.server_name'
db.delete_column('sentry_groupedmessage', 'server_name')
def backwards(self, orm):
# Deleting field 'Message.group'
db.delete_column('sentry_message', 'group_id')
# Adding field 'GroupedMessage.server_name'
db.add_column('sentry_groupedmessage', 'server_name', self.gf('django.db.models.fields.CharField')(default='', max_length=128, db_index=True), keep_default=False)
models = {
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
peinguin/imguploader | client/lib/jQuery-File-Upload-9.5.0/server/gae-python/main.py | 168 | 5596 | # -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.1.0
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json
import re
import urllib
import webapp2
WEBSITE = 'http://blueimp.github.io/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
self.response.headers[
'Access-Control-Allow-Headers'
] = 'Content-Type, Content-Range, Content-Disposition'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(
r'^.*\\',
'',
fieldStorage.filename
)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['deleteType'] = 'DELETE'
result['deleteUrl'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url.startswith(
'https'
)
)
result['thumbnailUrl'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',', ':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
blobstore.delete(self.request.get('key') or '')
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Prevent browsers from MIME-sniffing the content-type:
self.response.headers['X-Content-Type-Options'] = 'nosniff'
# Cache for the expiration time:
self.response.headers['Cache-Control'] = 'public,max-age=%d' % EXPIRATION_TIME
# Send the file forcing a download dialog:
self.send_blob(key, save_as=filename, content_type='application/octet-stream')
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
)
| mit |
40223101/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/difflib.py | 737 | 82544 | #! /usr/bin/env python3
"""
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'HtmlDiff', 'Match']
import warnings
import heapq
from collections import namedtuple as _namedtuple
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print(round(s.ratio(), 3))
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print("a[%d] and b[%d] match for %d elements" % block)
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk and popular elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use "in self.bjunk".
# bjunk
# the items in b for which isjunk is True.
# bpopular
# nonjunk items in b treated as junk by the heuristic (if used).
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
self.bjunk = junk = set()
isjunk = self.isjunk
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junk.add(elt)
for elt in junk: # separate loop avoids separate list of keys
del b2j[elt]
# Purge popular elements that are not junk
self.bpopular = popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in b2j.items():
if len(idxs) > ntest:
popular.add(elt)
for elt in popular: # ditto; as fast for 1% deletion
del b2j[elt]
def isbjunk(self, item):
"Deprecated; use 'item in SequenceMatcher().bjunk'."
warnings.warn("'SequenceMatcher().isbjunk(item)' is deprecated;\n"
"use 'item in SMinstance.bjunk' instead.",
DeprecationWarning, 2)
return item in self.bjunk
def isbpopular(self, item):
"Deprecated; use 'item in SequenceMatcher().bpopular'."
warnings.warn("'SequenceMatcher().isbpopular(item)' is deprecated;\n"
"use 'item in SMinstance.bpopular' instead.",
DeprecationWarning, 2)
return item in self.bpopular
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in range(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> list(s.get_matching_blocks())
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = non_adjacent
return map(Match._make, self.matching_blocks)
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = list(map(str, range(1,40)))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = sum(triple[-1] for triple in self.get_matching_blocks())
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("Apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(keepends=True)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(keepends=True)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print(''.join(result), end="")
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; as of Python 2.3, the underlying
SequenceMatcher class has grown an adaptive notion of "noise" lines
that's better than any static definition the author has ever been
able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True),
... 'ore\ntree\nemu\n'.splitlines(True))),
... end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError('unknown tag %r' % (tag,))
for line in g:
yield line
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in range(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
for line in g:
yield line
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print(''.join(results), end="")
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
yield line
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
yield line
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
for line in self._qformat(aelt, belt, atags, btags):
yield line
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
yield line
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
for line in g:
yield line
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print(repr(line))
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print(line) # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in {'replace', 'delete'}:
for line in a[i1:i2]:
yield '-' + line
if tag in {'replace', 'insert'}:
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')),
... end="")
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> print(''.join(diff), end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an iterator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile('(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in sub_info[::-1]:
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
try:
lines.append(next(diff_lines_iterator))
except StopIteration:
lines.append('X')
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see a intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
raise StopIteration
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
from_line, to_line, found_diff = next(line_iterator)
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
while True:
yield next(line_pair_iterator)
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
from_line, to_line, found_diff = next(line_pair_iterator)
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
while(lines_to_write):
from_line, to_line, found_diff = next(line_pair_iterator)
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=ISO-8859-1" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
return self._file_template % dict(
styles = self._styles,
legend = self._legend,
table = self.make_table(fromlines,tolines,fromdesc,todesc,
context=context,numlines=numlines))
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> diff = list(diff)
>>> print(''.join(restore(diff, 1)), end="")
one
two
three
>>> print(''.join(restore(diff, 2)), end="")
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
| gpl-3.0 |
Alex-Chizhov/python_training | home_work_7/fixture/contact.py | 1 | 3938 | class ContactHelper:
def __init__(self,app):
self.app = app
def link_add_new(self):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def input_save_form(self):
wd = self.app.wd
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
def fill_form(self, info_contact):
wd = self.app.wd
# inpuy firstname
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").send_keys(info_contact.firstname)
# input middelname
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").send_keys(info_contact.middelname)
# input lastname
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").send_keys(info_contact.lastname)
# input nickname
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").send_keys(info_contact.nickname)
# input title
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").send_keys(info_contact.title)
#input company
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").send_keys(info_contact.company)
# input addres
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").send_keys(info_contact.addres)
# input home
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").send_keys(info_contact.home)
# input mobile
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").send_keys(info_contact.mobile)
# input work
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").send_keys(info_contact.work)
# input fax
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").send_keys(info_contact.fax)
# input homepage
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").send_keys(info_contact.homepage)
# Birthday
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option%s"% info_contact.day_Birthday).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option%s" % info_contact.day_Birthday).click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option%s" % info_contact.month_Birthday).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option%s" % info_contact.month_Birthday).click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").send_keys(info_contact.year_Birthday)
# Anniversary
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option%s" % info_contact.day_Anniversary).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option%s" % info_contact.day_Anniversary).click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option%s" % info_contact.month_Anniversary).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option%s" % info_contact.month_Anniversary).click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").send_keys(info_contact.year_Anniversary)
# input address 2
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").send_keys(info_contact.address2)
# input phone 2
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").send_keys(info_contact.phone2)
# input notes
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").send_keys(info_contact.notes)
# input foto
wd.find_element_by_name("photo").send_keys(info_contact.photo)
def delete_first_contact(self):
wd = self.app.wd
#select first contact
wd.find_element_by_name("selected[]").click()
# Delite
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# agree in new window
wd.switch_to_alert().accept()
#home
wd.find_element_by_link_text("home").click() | apache-2.0 |
rruebner/odoo | openerp/tools/win32.py | 457 | 1993 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import locale
import time
import datetime
if not hasattr(locale, 'D_FMT'):
locale.D_FMT = 1
if not hasattr(locale, 'T_FMT'):
locale.T_FMT = 2
if not hasattr(locale, 'nl_langinfo'):
def nl_langinfo(param):
if param == locale.D_FMT:
val = time.strptime('30/12/2004', '%d/%m/%Y')
dt = datetime.datetime(*val[:-2])
format_date = dt.strftime('%x')
for x, y in [('30', '%d'),('12', '%m'),('2004','%Y'),('04', '%Y')]:
format_date = format_date.replace(x, y)
return format_date
if param == locale.T_FMT:
val = time.strptime('13:24:56', '%H:%M:%S')
dt = datetime.datetime(*val[:-2])
format_time = dt.strftime('%X')
for x, y in [('13', '%H'),('24', '%M'),('56','%S')]:
format_time = format_time.replace(x, y)
return format_time
locale.nl_langinfo = nl_langinfo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
maxdeliso/elevatorSim | Lib/test/test_dynamic.py | 173 | 4479 | # Test the most dynamic corner cases of Python's runtime semantics.
import builtins
import contextlib
import unittest
from test.support import run_unittest, swap_item, swap_attr
class RebindBuiltinsTests(unittest.TestCase):
"""Test all the ways that we can change/shadow globals/builtins."""
def configure_func(self, func, *args):
"""Perform TestCase-specific configuration on a function before testing.
By default, this does nothing. Example usage: spinning a function so
that a JIT will optimize it. Subclasses should override this as needed.
Args:
func: function to configure.
*args: any arguments that should be passed to func, if calling it.
Returns:
Nothing. Work will be performed on func in-place.
"""
pass
def test_globals_shadow_builtins(self):
# Modify globals() to shadow an entry in builtins.
def foo():
return len([1, 2, 3])
self.configure_func(foo)
self.assertEqual(foo(), 3)
with swap_item(globals(), "len", lambda x: 7):
self.assertEqual(foo(), 7)
def test_modify_builtins(self):
# Modify the builtins module directly.
def foo():
return len([1, 2, 3])
self.configure_func(foo)
self.assertEqual(foo(), 3)
with swap_attr(builtins, "len", lambda x: 7):
self.assertEqual(foo(), 7)
def test_modify_builtins_while_generator_active(self):
# Modify the builtins out from under a live generator.
def foo():
x = range(3)
yield len(x)
yield len(x)
self.configure_func(foo)
g = foo()
self.assertEqual(next(g), 3)
with swap_attr(builtins, "len", lambda x: 7):
self.assertEqual(next(g), 7)
def test_modify_builtins_from_leaf_function(self):
# Verify that modifications made by leaf functions percolate up the
# callstack.
with swap_attr(builtins, "len", len):
def bar():
builtins.len = lambda x: 4
def foo(modifier):
l = []
l.append(len(range(7)))
modifier()
l.append(len(range(7)))
return l
self.configure_func(foo, lambda: None)
self.assertEqual(foo(bar), [7, 4])
def test_cannot_change_globals_or_builtins_with_eval(self):
def foo():
return len([1, 2, 3])
self.configure_func(foo)
# Note that this *doesn't* change the definition of len() seen by foo().
builtins_dict = {"len": lambda x: 7}
globals_dict = {"foo": foo, "__builtins__": builtins_dict,
"len": lambda x: 8}
self.assertEqual(eval("foo()", globals_dict), 3)
self.assertEqual(eval("foo()", {"foo": foo}), 3)
def test_cannot_change_globals_or_builtins_with_exec(self):
def foo():
return len([1, 2, 3])
self.configure_func(foo)
globals_dict = {"foo": foo}
exec("x = foo()", globals_dict)
self.assertEqual(globals_dict["x"], 3)
# Note that this *doesn't* change the definition of len() seen by foo().
builtins_dict = {"len": lambda x: 7}
globals_dict = {"foo": foo, "__builtins__": builtins_dict,
"len": lambda x: 8}
exec("x = foo()", globals_dict)
self.assertEqual(globals_dict["x"], 3)
def test_cannot_replace_builtins_dict_while_active(self):
def foo():
x = range(3)
yield len(x)
yield len(x)
self.configure_func(foo)
g = foo()
self.assertEqual(next(g), 3)
with swap_item(globals(), "__builtins__", {"len": lambda x: 7}):
self.assertEqual(next(g), 3)
def test_cannot_replace_builtins_dict_between_calls(self):
def foo():
return len([1, 2, 3])
self.configure_func(foo)
self.assertEqual(foo(), 3)
with swap_item(globals(), "__builtins__", {"len": lambda x: 7}):
self.assertEqual(foo(), 3)
def test_eval_gives_lambda_custom_globals(self):
globals_dict = {"len": lambda x: 7}
foo = eval("lambda: len([])", globals_dict)
self.configure_func(foo)
self.assertEqual(foo(), 7)
def test_main():
run_unittest(RebindBuiltinsTests)
if __name__ == "__main__":
test_main()
| bsd-2-clause |
whitmo/prism | setup.py | 1 | 2106 | import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
py_version = sys.version_info[:2]
PY3 = py_version[0] == 3
here = os.path.abspath(os.path.dirname(__file__))
def _read(path):
with open(path, 'r') as fp:
return fp.read()
try:
README = _read(os.path.join(here, 'README.md'))
CHANGES = _read(os.path.join(here, 'CHANGES.rst'))
except:
README = CHANGES = ''
install_requires = [
'pyramid',
'path.py',
]
if py_version < (2, 7):
install_requires += ['argparse']
tests_require = install_requires + [
'pytest',
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--pyargs', self.test_suite]
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
result = pytest.main(self.test_args)
sys.exit(result)
setup(
name='prism',
version='0.0',
description='',
long_description=README + '\n\n' + CHANGES,
url='https://github.com/whitmo/prism',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Operating System :: OS Independent',
'Framework :: Pyramid',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
keywords='pyramid',
author='Whit Morriss and Micheal Merickel',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=install_requires,
extras_require=dict(zkconfig=['kazoo']),
tests_require=tests_require,
test_suite='prism.tests',
cmdclass={'test': PyTest},
)
| bsd-3-clause |
OmeGak/indico | indico/legacy/pdfinterface/base.py | 2 | 25142 | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import cgi
import math
import os
import xml.sax.saxutils as saxutils
import pkg_resources
from PIL import Image as PILImage
from reportlab import platypus
from reportlab.lib.enums import TA_CENTER
from reportlab.lib.fonts import addMapping
from reportlab.lib.pagesizes import A0, A1, A2, A3, A4, A5, A6, LETTER, landscape
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.units import cm, inch
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus import PageTemplate, SimpleDocTemplate
from reportlab.platypus.frames import Frame
from indico.legacy.common.utils import isStringHTML
from indico.util.i18n import _
from indico.util.string import sanitize_for_platypus, to_unicode
ratio = math.sqrt(math.sqrt(2.0))
class PDFSizes:
def __init__(self):
self.PDFpagesizes = {
'Letter': LETTER,
'A0': A3,
'A1': A3,
'A2': A3,
'A3': A3,
'A4': A4,
'A5': A5,
'A6': A6
}
def escape(text):
if text is None:
text = ""
try:
if isStringHTML(text):
text = sanitize_for_platypus(text)
else:
text = cgi.escape(text)
text = text.replace("\r\n", " <br/>")
text = text.replace("\n", " <br/>")
text = text.replace("\r", " <br/>")
return text
except Exception:
return saxutils.escape(text)
def modifiedFontSize(fontsize, lowerNormalHigher):
if lowerNormalHigher == "normal":
return fontsize
elif lowerNormalHigher == "small":
return fontsize / ratio
elif lowerNormalHigher == "large":
return fontsize * ratio
elif lowerNormalHigher == "smaller":
return (fontsize / ratio) / ratio
elif lowerNormalHigher == "x-small":
return ((fontsize / ratio) / ratio) / ratio
elif lowerNormalHigher == "xx-small":
return (((fontsize / ratio) / ratio) / ratio) / ratio
elif lowerNormalHigher == "xxx-small":
return ((((fontsize / ratio) / ratio) / ratio) / ratio) / ratio
elif lowerNormalHigher == "larger":
return fontsize * ratio * ratio
else:
return fontsize
alreadyRegistered = False
def setTTFonts():
global alreadyRegistered
if not alreadyRegistered:
distribution = pkg_resources.get_distribution('indico-fonts')
font_dir = os.path.join(distribution.location, 'indico_fonts')
pdfmetrics.registerFont(TTFont('Times-Roman', os.path.join(font_dir, 'LiberationSerif-Regular.ttf')))
pdfmetrics.registerFont(TTFont('Times-Bold', os.path.join(font_dir, 'LiberationSerif-Bold.ttf')))
pdfmetrics.registerFont(TTFont('Times-Italic', os.path.join(font_dir, 'LiberationSerif-Italic.ttf')))
pdfmetrics.registerFont(TTFont('Times-Bold-Italic', os.path.join(font_dir, 'LiberationSerif-BoldItalic.ttf')))
addMapping('Times-Roman', 0, 0, 'Times-Roman')
addMapping('Times-Roman', 1, 0, 'Times-Bold')
addMapping('Times-Roman', 0, 1, 'Times-Italic')
addMapping('Times-Roman', 1, 1, 'Times-Bold-Italic')
pdfmetrics.registerFont(TTFont('Sans', os.path.join(font_dir, 'LiberationSans-Regular.ttf')))
pdfmetrics.registerFont(TTFont('Sans-Bold', os.path.join(font_dir, 'LiberationSans-Bold.ttf')))
pdfmetrics.registerFont(TTFont('Sans-Italic', os.path.join(font_dir, 'LiberationSans-Italic.ttf')))
pdfmetrics.registerFont(TTFont('Sans-Bold-Italic', os.path.join(font_dir, 'LiberationSans-BoldItalic.ttf')))
addMapping('Sans', 0, 0, 'Sans')
addMapping('Sans', 1, 0, 'Sans-Bold')
addMapping('Sans', 0, 1, 'Sans-Italic')
addMapping('Sans', 1, 1, 'Sans-Bold-Italic')
pdfmetrics.registerFont(TTFont('Courier', os.path.join(font_dir, 'LiberationMono-Regular.ttf')))
pdfmetrics.registerFont(TTFont('Courier-Bold', os.path.join(font_dir, 'LiberationMono-Bold.ttf')))
pdfmetrics.registerFont(TTFont('Courier-Italic', os.path.join(font_dir, 'LiberationMono-Italic.ttf')))
pdfmetrics.registerFont(TTFont('Courier-Bold-Italic', os.path.join(font_dir, 'LiberationMono-BoldItalic.ttf')))
addMapping('Courier', 0, 0, 'Courier')
addMapping('Courier', 1, 0, 'Courier-Bold')
addMapping('Courier', 0, 1, 'Courier-Italic')
addMapping('Courier', 1, 1, 'Courier-Bold-Italic')
pdfmetrics.registerFont(TTFont('LinuxLibertine', os.path.join(font_dir, 'LinLibertine_Rah.ttf')))
pdfmetrics.registerFont(TTFont('LinuxLibertine-Bold', os.path.join(font_dir, 'LinLibertine_RBah.ttf')))
pdfmetrics.registerFont(TTFont('LinuxLibertine-Italic', os.path.join(font_dir, 'LinLibertine_RIah.ttf')))
pdfmetrics.registerFont(TTFont('LinuxLibertine-Bold-Italic', os.path.join(font_dir, 'LinLibertine_RBIah.ttf')))
addMapping('LinuxLibertine', 0, 0, 'LinuxLibertine')
addMapping('LinuxLibertine', 1, 0, 'LinuxLibertine-Bold')
addMapping('LinuxLibertine', 0, 1, 'LinuxLibertine-Italic')
addMapping('LinuxLibertine', 1, 1, 'LinuxLibertine-Bold-Italic')
pdfmetrics.registerFont(TTFont('Kochi-Mincho', os.path.join(font_dir, 'kochi-mincho-subst.ttf')))
pdfmetrics.registerFont(TTFont('Kochi-Gothic', os.path.join(font_dir, 'kochi-gothic-subst.ttf')))
alreadyRegistered = True
def int_to_roman(value):
"""Convert an integer to Roman numerals."""
if not 0 < value < 4000:
raise ValueError( _("Int to Roman Error: Argument must be between 1 and 3999"))
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('m', 'cm', 'd', 'cd','c', 'xc','l','xl','x','ix','v','iv','i')
result = ""
for i in range(len(ints)):
count = int(value / ints[i])
result += nums[i] * count
value -= ints[i] * count
return result
class Paragraph(platypus.Paragraph):
"""
add a part attribute for drawing the name of the current part on the laterPages function
"""
def __init__(self, text, style, part="", bulletText=None, frags=None, caseSensitive=1):
platypus.Paragraph.__init__(self, to_unicode(text), style, bulletText, frags, caseSensitive)
self._part = part
def setPart(self, part):
self._part = part
def getPart(self):
return self._part
class SimpleParagraph(platypus.Flowable):
""" Simple and fast paragraph.
WARNING! This paragraph cannot break the line and doesn't have almost any formatting methods.
It's used only to increase PDF performance in places where normal paragraph is not needed.
"""
def __init__(self, text, fontSize = 10, indent = 0, spaceAfter = 2):
platypus.Flowable.__init__(self)
self.text = text
self.height = fontSize + spaceAfter
self.fontSize = fontSize
self.spaceAfter = spaceAfter
self.indent = indent
def __repr__(self):
return ""
def draw(self):
#centre the text
self.canv.setFont('Times-Roman',self.fontSize)
self.canv.drawString(self.indent, self.spaceAfter, self.text)
class TableOfContentsEntry(Paragraph):
"""
Class used to create table of contents entry with its number.
Much faster than table of table of contents from platypus lib
"""
def __init__(self, text, pageNumber, style, part="", bulletText=None, frags=None, caseSensitive=1):
Paragraph.__init__(self, to_unicode(text), style, part, bulletText, frags, caseSensitive)
self._part = part
self._pageNumber = pageNumber
def _drawDots(self):
"""
Draws row of dots from the end of the abstract title to the page number.
"""
try:
freeSpace = int(self.blPara.lines[-1][0])
except AttributeError:
# Sometimes we get an ABag instead of a tuple.. in this case we use the extraSpace attribute
# as it seems to contain just what we need.
freeSpace = int(self.blPara.lines[-1].extraSpace)
while( freeSpace > 10 ):
dot = self.beginText(self.width + 10 - freeSpace, self.style.leading - self.style.fontSize)
dot.textLine(".")
self.canv.drawText(dot)
freeSpace -= 3
def draw(self):
platypus.Paragraph.draw(self)
tx = self.beginText(self.width + 10, self.style.leading - self.style.fontSize)
tx.setFont(self.style.fontName, self.style.fontSize, 0)
tx.textLine(str(self._pageNumber))
self.canv.drawText(tx)
self._drawDots()
class Spacer(platypus.Spacer):
def __init__(self, width, height, part=""):
platypus.Spacer.__init__(self, width, height)
self._part = part
def setPart(self, part):
self._part = part
def getPart(self):
return self._part
class Image(platypus.Image):
def __init__(self, filename, part="", width=None, height=None, kind='direct', mask="auto", lazy=1):
platypus.Image.__init__(self, filename, width=None, height=None, kind='direct', mask="auto", lazy=1)
self._part = part
def setPart(self, part):
self._part = part
def getPart(self):
return self._part
class PageBreak(platypus.PageBreak):
def __init__(self, part=""):
platypus.PageBreak.__init__(self)
self._part = part
def setPart(self, part):
self._part = part
def getPart(self):
return self._part
class Preformatted(platypus.Preformatted):
def __init__(self, text, style, part="", bulletText = None, dedent=0):
platypus.Preformatted.__init__(self, text, style, bulletText = None, dedent=0)
self._part = part
def setPart(self, part):
self._part = part
def getPart(self):
return self._part
class FileDummy:
def __init__(self):
self._data = ""
self.name = "fileDummy"
def write(self, data):
self._data += data
def getData(self):
return self._data
def close(self):
pass
class CanvasA0(Canvas):
def __init__(self,filename,
pagesize=None,
bottomup = 1,
pageCompression=None,
encoding = None,
invariant = None,
verbosity=0):
Canvas.__init__(self, filename, pagesize=pagesize, bottomup=bottomup, pageCompression=pageCompression,
encoding=encoding, invariant=invariant, verbosity=verbosity)
self.scale(4.0, 4.0)
self.setPageSize(A0)
class CanvasA1(Canvas):
def __init__(self,filename,
pagesize=None,
bottomup = 1,
pageCompression=None,
encoding = None,
invariant = None,
verbosity=0):
Canvas.__init__(self, filename, pagesize=pagesize, bottomup=bottomup, pageCompression=pageCompression,
encoding=encoding, invariant=invariant, verbosity=verbosity)
self.scale(2.0 * math.sqrt(2.0), 2.0 * math.sqrt(2.0))
self.setPageSize(A1)
class CanvasA2(Canvas):
def __init__(self,filename,
pagesize=None,
bottomup = 1,
pageCompression=None,
encoding = None,
invariant = None,
verbosity=0):
Canvas.__init__(self, filename, pagesize=pagesize, bottomup=bottomup, pageCompression=pageCompression,
encoding=encoding, invariant=invariant, verbosity=verbosity)
self.scale(2.0, 2.0)
self.setPageSize(A2)
class CanvasA3(Canvas):
def __init__(self,filename,
pagesize=None,
bottomup = 1,
pageCompression=None,
encoding = None,
invariant = None,
verbosity=0):
Canvas.__init__(self, filename, pagesize=pagesize, bottomup=bottomup, pageCompression=pageCompression,
encoding=encoding, invariant=invariant, verbosity=verbosity)
self.scale(math.sqrt(2.0), math.sqrt(2.0))
self.setPageSize(A3)
class CanvasA5(Canvas):
def __init__(self,filename,
pagesize=None,
bottomup = 1,
pageCompression=None,
encoding = None,
invariant = None,
verbosity=0):
Canvas.__init__(self, filename, pagesize=pagesize, bottomup=bottomup, pageCompression=pageCompression,
encoding=encoding, invariant=invariant, verbosity=verbosity)
self.scale(1.0 / math.sqrt(2.0), 1.0 / math.sqrt(2.0))
self.setPageSize(A5)
pagesizeNameToCanvas = {'A4': Canvas,
'A0': CanvasA0,
'A1': CanvasA1,
'A2': CanvasA2,
'A3': CanvasA3,
'A5': CanvasA5,
'Letter': Canvas,
}
class PDFBase:
def __init__(self, doc=None, story=None, pagesize = 'A4', printLandscape=False, title=None):
if doc:
self._doc = doc
else:
#create a new document
#As the constructor of SimpleDocTemplate can take only a filename or a file object,
#to keep the PDF data not in a file, we use a dummy file object which save the data in a string
self._fileDummy = FileDummy()
if printLandscape:
self._doc = SimpleDocTemplate(self._fileDummy, pagesize=landscape(PDFSizes().PDFpagesizes[pagesize]))
else:
self._doc = SimpleDocTemplate(self._fileDummy, pagesize=PDFSizes().PDFpagesizes[pagesize])
if title is not None:
self._doc.title = title
if story is not None:
self._story = story
else:
#create a new story with a spacer which take all the first page
#then the first page is only drawing by the firstPage method
self._story = [PageBreak()]
if printLandscape:
self._PAGE_HEIGHT = landscape(PDFSizes().PDFpagesizes[pagesize])[1]
self._PAGE_WIDTH = landscape(PDFSizes().PDFpagesizes[pagesize])[0]
else:
self._PAGE_HEIGHT = PDFSizes().PDFpagesizes[pagesize][1]
self._PAGE_WIDTH = PDFSizes().PDFpagesizes[pagesize][0]
self._canv = Canvas
setTTFonts()
def firstPage(self, c, doc):
"""set the first page of the document
This function is call by doc.build method for the first page
"""
pass
def laterPages(self, c, doc):
"""set the layout of the page after the first
This function is call by doc.build method one each page after the first
"""
pass
def getBody(self, story=None):
"""add the content to the story
"""
pass
def getPDFBin(self):
#build the pdf in the fileDummy
self.getBody()
self._doc.build(self._story, onFirstPage=self.firstPage, onLaterPages=self.laterPages)
#return the data from the fileDummy
return self._fileDummy.getData()
def _drawWrappedString(self, c, text, font='Times-Bold', size=30, color=(0, 0, 0), align="center", width=None,
height=None, measurement=cm, lineSpacing=1, maximumWidth=None):
if maximumWidth is None:
maximumWidth = self._PAGE_WIDTH-1*cm
if width is None:
width=self._PAGE_WIDTH/2.0
if height is None:
height=self._PAGE_HEIGHT-10*measurement
draw = c.drawCentredString
if align == "right":
draw = c.drawRightString
elif align == "left":
draw = c.drawString
c.setFont(font, size)
c.setFillColorRGB(*color)
titleWords = text.split()
line=""
for word in titleWords:
lineAux = "%s %s"%(line, word)
lsize = c.stringWidth(lineAux, font, size)
if lsize < maximumWidth:
line = lineAux
else:
draw(width,height, line)
height -= lineSpacing*measurement
line = word
if line.strip() != "":
draw(width, height, line)
return height
def _drawLogo(self, c, drawTitle = True):
from indico.modules.events.util import create_event_logo_tmp_file
logo = self.event.logo
imagePath = ""
if logo:
imagePath = create_event_logo_tmp_file(self.event)
if imagePath:
try:
img = PILImage.open(imagePath)
width, height = img.size
# resize in case too big for page
if width > self._PAGE_WIDTH / 2:
ratio = float(height)/width
width = self._PAGE_WIDTH / 2
height = width * ratio
startHeight = self._PAGE_HEIGHT
if drawTitle:
startHeight = self._drawWrappedString(c, escape(self.event.title.encode('utf-8')),
height=self._PAGE_HEIGHT - inch)
# lower edge of the image
startHeight = startHeight - inch / 2 - height
# draw horizontally centered, with recalculated width and height
c.drawImage(imagePath, self._PAGE_WIDTH/2.0 - width/2, startHeight, width, height, mask="auto")
return startHeight
except IOError:
if drawTitle:
self._drawWrappedString(c, escape(self.event.title.encode('utf-8')),
height=self._PAGE_HEIGHT - inch)
return 0
def _doNothing(canvas, doc):
"Dummy callback for onPage"
pass
class DocTemplateWithTOC(SimpleDocTemplate):
def __init__(self, indexedFlowable, filename, firstPageNumber=1, include_toc=False, **kw):
"""toc is the TableOfContents object
indexedFlowale is a dictionnary with flowables as key and a dictionnary as value.
the sub-dictionnary have two key:
text: the text which will br print in the table
level: the level of the entry( modifying the indentation and the police
"""
self.include_toc = include_toc
self._toc = []
self._tocStory = []
self._indexedFlowable = indexedFlowable
self._filename = filename
self._part = ""
self._firstPageNumber = firstPageNumber
SimpleDocTemplate.__init__(self, filename, **kw)
setTTFonts()
self._PAGE_HEIGHT = self.pagesize[1]
self._PAGE_WIDTH = self.pagesize[0]
def afterFlowable(self, flowable):
if flowable in self._indexedFlowable:
self._toc.append((self._indexedFlowable[flowable]["level"],self._indexedFlowable[flowable]["text"], self.page + self._firstPageNumber - 1))
try:
if flowable.getPart() != "":
self._part = flowable.getPart()
except:
pass
def handle_documentBegin(self):
self._part = ""
SimpleDocTemplate.handle_documentBegin(self)
def _prepareTOC(self):
headerStyle = ParagraphStyle({})
headerStyle.fontName = "LinuxLibertine-Bold"
headerStyle.fontSize = modifiedFontSize(18, 18)
headerStyle.leading = modifiedFontSize(22, 22)
headerStyle.alignment = TA_CENTER
entryStyle = ParagraphStyle({})
entryStyle.fontName = "LinuxLibertine"
entryStyle.spaceBefore = 8
self._tocStory.append(PageBreak())
if self.include_toc:
self._tocStory.append(Spacer(inch, 1*cm))
self._tocStory.append(Paragraph(_("Table of contents"), headerStyle))
self._tocStory.append(Spacer(inch, 2*cm))
for entry in self._toc:
indent = ((entry[0] - 1) * 50)
toc_entry = TableOfContentsEntry('<para leftIndent={}>{}</para>'.format(indent, entry[1]),
str(entry[2]), entryStyle)
self._tocStory.append(toc_entry)
def laterPages(self, c, doc):
c.saveState()
c.setFont('Times-Roman',9)
c.setFillColorRGB(0.5,0.5,0.5)
c.drawCentredString(self._PAGE_WIDTH / 2.0, 0.5 * cm, '%s ' % int_to_roman(doc.page - 1))
c.restoreState()
def multiBuild(self, story, filename=None, canvasMaker=Canvas, maxPasses=10, onFirstPage=_doNothing, onLaterPages=_doNothing):
self._calc() #in case we changed margins sizes etc
frameT = Frame(self.leftMargin, self.bottomMargin, self.width, self.height, id='normal')
self.addPageTemplates([PageTemplate(id='Later',frames=frameT, onPageEnd=onLaterPages,pagesize=self.pagesize)])
if onLaterPages is _doNothing and hasattr(self,'onLaterPages'):
self.pageTemplates[0].beforeDrawPage = self.onLaterPages
SimpleDocTemplate.multiBuild(self, story, maxPasses, canvasmaker=canvasMaker)
self._prepareTOC()
contentFile = self.filename
self.filename = FileDummy()
self.pageTemplates = []
self.addPageTemplates([PageTemplate(id='First',frames=frameT, onPage=onFirstPage,pagesize=self.pagesize)])
if onFirstPage is _doNothing and hasattr(self,'onFirstPage'):
self.pageTemplates[0].beforeDrawPage = self.onFirstPage
self.addPageTemplates([PageTemplate(id='Later',frames=frameT, onPageEnd=self.laterPages,pagesize=self.pagesize)])
if onLaterPages is _doNothing and hasattr(self,'onLaterPages'):
self.pageTemplates[1].beforeDrawPage = self.onLaterPages
SimpleDocTemplate.multiBuild(self, self._tocStory, maxPasses, canvasmaker=canvasMaker)
self.mergePDFs(self.filename, contentFile)
def mergePDFs(self, pdf1, pdf2):
from pyPdf import PdfFileWriter, PdfFileReader
import cStringIO
outputStream = cStringIO.StringIO()
pdf1Stream = cStringIO.StringIO()
pdf2Stream = cStringIO.StringIO()
pdf1Stream.write(pdf1.getData())
pdf2Stream.write(pdf2.getData())
output = PdfFileWriter()
background_pages = PdfFileReader(pdf1Stream)
foreground_pages = PdfFileReader(pdf2Stream)
for page in background_pages.pages:
output.addPage(page)
for page in foreground_pages.pages:
output.addPage(page)
output.write(outputStream)
pdf2._data = outputStream.getvalue()
outputStream.close()
def getCurrentPart(self):
return self._part
class PDFWithTOC(PDFBase):
"""
create a PDF with a Table of Contents
"""
def __init__(self, story=None, pagesize='A4', fontsize='normal', firstPageNumber=1, include_toc=True):
self._fontsize = fontsize
self._story = story
if story is None:
self._story = []
# without this blank spacer first abstract isn't displayed. why?
self._story.append(Spacer(inch, 0*cm))
self._indexedFlowable = {}
self._fileDummy = FileDummy()
self._doc = DocTemplateWithTOC(self._indexedFlowable, self._fileDummy, firstPageNumber=firstPageNumber,
pagesize=PDFSizes().PDFpagesizes[pagesize],
include_toc=include_toc)
self._PAGE_HEIGHT = PDFSizes().PDFpagesizes[pagesize][1]
self._PAGE_WIDTH = PDFSizes().PDFpagesizes[pagesize][0]
setTTFonts()
def _processTOCPage(self):
""" Generates page with table of contents.
Not used, because table of contents is generated automatically inside DocTemplateWithTOC class
"""
style1 = ParagraphStyle({})
style1.fontName = "Times-Bold"
style1.fontSize = modifiedFontSize(18, self._fontsize)
style1.leading = modifiedFontSize(22, self._fontsize)
style1.alignment = TA_CENTER
p = Paragraph(_("Table of contents"), style1)
self._story.append(Spacer(inch, 1*cm))
self._story.append(p)
self._story.append(Spacer(inch, 2*cm))
self._story.append(self._toc)
self._story.append(PageBreak())
def getBody(self, story=None):
"""add the content to the story
When you want to put a paragraph p in the toc, add it to the self._indexedFlowable as this:
self._indexedFlowable[p] = {"text":"my title", "level":1}
"""
if not story:
story = self._story
pass
def getPDFBin(self):
self.getBody()
self._doc.multiBuild(self._story, onFirstPage=self.firstPage, onLaterPages=self.laterPages)
return self._fileDummy.getData()
| mit |
wxkdesky/phantomjs | src/qt/qtwebkit/Tools/BuildSlaveSupport/wait-for-SVN-server.py | 118 | 2670 | #!/usr/bin/env python
#
# Copyright (C) 2006 John Pye
# Copyright (C) 2012 University of Szeged
#
# This script is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
from optparse import OptionParser
import exceptions
import sys
import time
import xml.dom.minidom
import os
import subprocess
def getLatestSVNRevision(SVNServer):
try:
p = subprocess.Popen(["svn", "log", "--non-interactive", "--verbose", "--xml", "--limit=1", SVNServer], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
response = p.communicate()[0]
doc = xml.dom.minidom.parseString(response)
el = doc.getElementsByTagName("logentry")[0]
return el.getAttribute("revision")
except xml.parsers.expat.ExpatError, e:
print "FAILED TO PARSE 'svn log' XML:"
print str(e)
print "----"
print "RECEIVED TEXT:"
print response
sys.exit(1)
def waitForSVNRevision(SVNServer, revision):
if not revision or not revision.isdigit():
latestRevision = int(getLatestSVNRevision(SVNServer))
print "Latest SVN revision on %s is r%d. Don't wait, because revision argument isn't a valid SVN revision." % (SVNServer, latestRevision)
return
revision = int(revision)
while True:
latestRevision = int(getLatestSVNRevision(SVNServer))
if latestRevision < revision:
print "Latest SVN revision on %s is r%d, but we are waiting for r%d. Sleeping for 5 seconds." % (SVNServer, latestRevision, revision)
time.sleep(5)
else:
print "Latest SVN revision on %s is r%d, which is newer or equal than r%d." % (SVNServer, latestRevision, revision)
break
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-r", "--revision", dest="revision", help="SVN revision number")
parser.add_option("-s", "--svn-server", dest="SVNServer", help="SVN server")
options, args = parser.parse_args()
waitForSVNRevision(options.SVNServer, options.revision)
| bsd-3-clause |
Ben-Farnfield/dotdesk | action/install.py | 1 | 2665 |
"""
This module controls the install process for dotdesk.
install() -- runs the full install process
"""
import utils
import const
from interface import prompt
from interface import output
from model.dot_desktop_model import DotDesktopModel
from model.icon_model import IconModel
import sys
def install(args):
""" Runs full install process if the .desktop file doesn't exist. """
program = args["name"]
if utils.file_exists(DotDesktopModel.INSTALL_DIR + program + ".desktop"):
print program + ".desktop is already installed."
sys.exit()
desktop = DotDesktopModel(program)
icon = IconModel(program)
desktop, icon = _run_install_cli(desktop, icon)
output.line() # make things pretty
_run_desktop_install(desktop)
if icon.install_icon:
_run_icon_install(icon)
print "Bye!"
output.line()
def _run_install_cli(desktop, icon):
"""
Runs the full install interface.
The user is asked for all the basic info a .desktop file requires. The
user is provided the option to install an icon or not.
"""
desktop.terminal = prompt.for_yes_no("Terminal app?")
desktop.tooltip = prompt.for_string("Enter tooltip")
desktop.exe = prompt.for_string("Enter execution command")
output.option_list("Select a category", const.CATEGORIES)
selection = prompt.for_selection("Make a selection", const.CATEGORIES_LEN)
desktop.category = const.CATEGORIES[selection]
icon.install_icon = prompt.for_yes_no("Install an icon?")
if icon.install_icon:
icon.icon_to_install = prompt.for_path("Enter full path to icon")
icon.icon_type = utils.file_type(icon.icon_to_install)
output.option_list("Select icon size", const.ICON_SIZES)
selection = prompt.for_selection("Make a selection",
const.ICON_SIZES_LEN)
icon.icon_size = const.ICON_SIZES[selection]
desktop.icon = str(icon)
return desktop, icon
def _run_desktop_install(desktop):
""" Installs the .desktop file in its install dir. """
path = DotDesktopModel.INSTALL_DIR + desktop.name + ".desktop"
desktop_contents = str(desktop)
utils.proc_file("write", path, desktop_contents,
desktop.name + ".desktop installed!",
"!!Issue installing .desktop!!")
def _run_icon_install(icon):
""" Installs the icon in its install dir. """
install_path = str(icon)
utils.proc_file("copy", icon.icon_to_install, install_path,
icon.icon_name + icon.icon_type + " installed!",
"!!Issue installing icon!!")
| mit |
NirantK/Twitter-Geographical-Sentiment-Analysis | IndiaHappiestState/freqAnalysis.py | 2 | 1173 | def freqAnalysis(key):
# for key in happyStates.keys():
words = []
tempWords = []
rawWords = happyStates[key].tweet.split(" ")
# print rawWords[0]
# x = False
impTweets = 0.0
for word in rawWords:
for thing in things_to_strip:
if thing in word:
word = word.replace(thing,"")
if word.lower() not in words_to_ignore:
words.append(word)
if word in wordsToNotice:
happyStates[key].isRailBudget = True
impTweets += 1
# print happyStates[key].tweet
# print words
counter = collections.Counter(words)
mostCommon = counter.most_common()
z = len(mostCommon)/100
j = 0.0
# for i in xrange(len(mostCommon)):
# print mostCommon[i][0], mostCommon[i][1]
# ele = mostCommon[i][0].lower()
# if ele=='i' or ele=='me':
# print mostCommon[i]
# j += mostCommon[i][1]
# print "Narcissist Value:",j/len(words)
# print "Happiness Value:",(happyStates[key].score)/(happyStates[key].count)
print impTweets
# print
# mostCommon = [x[0] for x in mostCommon]
| mit |
gunner08/cocosJS | JS/jsTest/tools/bindings-generator/clang/enumerations.py | 307 | 1077 | #===- enumerations.py - Python Enumerations ------------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
Clang Enumerations
==================
This module provides static definitions of enumerations that exist in libclang.
Enumerations are typically defined as a list of tuples. The exported values are
typically munged into other types or classes at module load time.
All enumerations are centrally defined in this file so they are all grouped
together and easier to audit. And, maybe even one day this file will be
automatically generated by scanning the libclang headers!
"""
# Maps to CXTokenKind. Note that libclang maintains a separate set of token
# enumerations from the C++ API.
TokenKinds = [
('PUNCTUATION', 0),
('KEYWORD', 1),
('IDENTIFIER', 2),
('LITERAL', 3),
('COMMENT', 4),
]
__all__ = ['TokenKinds']
| apache-2.0 |
jumping/Diamond | src/diamond/metric.py | 20 | 5762 | # coding=utf-8
import time
import re
import logging
from error import DiamondException
class Metric(object):
# This saves a significant amount of memory per object. This only matters
# due to the queue system that moves objects between processes and can end
# up storing a large number of objects in the queue waiting for the
# handlers to flush.
__slots__ = [
'path', 'value', 'raw_value', 'timestamp', 'precision',
'host', 'metric_type', 'ttl'
]
def __init__(self, path, value, raw_value=None, timestamp=None, precision=0,
host=None, metric_type='COUNTER', ttl=None):
"""
Create new instance of the Metric class
Takes:
path=string: string the specifies the path of the metric
value=[float|int]: the value to be submitted
timestamp=[float|int]: the timestamp, in seconds since the epoch
(as from time.time()) precision=int: the precision to apply.
Generally the default (2) should work fine.
"""
# Validate the path, value and metric_type submitted
if (None in [path, value] or metric_type not in ('COUNTER', 'GAUGE')):
raise DiamondException(("Invalid parameter when creating new "
"Metric with path: %r value: %r "
"metric_type: %r")
% (path, value, metric_type))
# If no timestamp was passed in, set it to the current time
if timestamp is None:
timestamp = int(time.time())
else:
# If the timestamp isn't an int, then make it one
if not isinstance(timestamp, int):
try:
timestamp = int(timestamp)
except ValueError as e:
raise DiamondException(("Invalid timestamp when "
"creating new Metric %r: %s")
% (path, e))
# The value needs to be a float or an int. If it is, great. If not,
# try to cast it to one of those.
if not isinstance(value, (int, float)):
try:
if precision == 0:
value = round(float(value))
else:
value = float(value)
except ValueError as e:
raise DiamondException(("Invalid value when creating new "
"Metric %r: %s") % (path, e))
self.path = path
self.value = value
self.raw_value = raw_value
self.timestamp = timestamp
self.precision = precision
self.host = host
self.metric_type = metric_type
self.ttl = ttl
def __repr__(self):
"""
Return the Metric as a string
"""
if not isinstance(self.precision, (int, long)):
log = logging.getLogger('diamond')
log.warn('Metric %s does not have a valid precision', self.path)
self.precision = 0
# Set the format string
fstring = "%%s %%0.%if %%i\n" % self.precision
# Return formated string
return fstring % (self.path, self.value, self.timestamp)
def __getstate__(self):
return dict(
(slot, getattr(self, slot))
for slot in self.__slots__
if hasattr(self, slot)
)
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
@classmethod
def parse(cls, string):
"""
Parse a string and create a metric
"""
match = re.match(r'^(?P<name>[A-Za-z0-9\.\-_]+)\s+' +
'(?P<value>[0-9\.]+)\s+' +
'(?P<timestamp>[0-9\.]+)(\n?)$',
string)
try:
groups = match.groupdict()
# TODO: get precision from value string
return Metric(groups['name'],
groups['value'],
float(groups['timestamp']))
except:
raise DiamondException(
"Metric could not be parsed from string: %s." % string)
def getPathPrefix(self):
"""
Returns the path prefix path
servers.host.cpu.total.idle
return "servers"
"""
# If we don't have a host name, assume it's just the first part of the
# metric path
if self.host is None:
return self.path.split('.')[0]
offset = self.path.index(self.host) - 1
return self.path[0:offset]
def getCollectorPath(self):
"""
Returns collector path
servers.host.cpu.total.idle
return "cpu"
"""
# If we don't have a host name, assume it's just the third part of the
# metric path
if self.host is None:
return self.path.split('.')[2]
offset = self.path.index(self.host)
offset += len(self.host) + 1
endoffset = self.path.index('.', offset)
return self.path[offset:endoffset]
def getMetricPath(self):
"""
Returns the metric path after the collector name
servers.host.cpu.total.idle
return "total.idle"
"""
# If we don't have a host name, assume it's just the fourth+ part of the
# metric path
if self.host is None:
path = self.path.split('.')[3:]
return '.'.join(path)
prefix = '.'.join([self.getPathPrefix(), self.host,
self.getCollectorPath()])
offset = len(prefix) + 1
return self.path[offset:]
| mit |
gmist/alice-box | main/config.py | 1 | 2447 | # coding: utf-8
import os
PRODUCTION = os.environ.get('SERVER_SOFTWARE', '').startswith('Google App Eng')
DEVELOPMENT = not PRODUCTION
DEBUG = DEVELOPMENT
try:
# This part is surrounded in try/except because the config.py file is
# also used in the run.py script which is used to compile/minify the client
# side files (*.less, *.coffee, *.js) and is not aware of the GAE
from datetime import datetime
from google.appengine.api import app_identity
CURRENT_VERSION_ID = os.environ.get('CURRENT_VERSION_ID')
CURRENT_VERSION_NAME = CURRENT_VERSION_ID.split('.')[0]
CURRENT_VERSION_TIMESTAMP = long(CURRENT_VERSION_ID.split('.')[1]) >> 28
if DEVELOPMENT:
import calendar
CURRENT_VERSION_TIMESTAMP = calendar.timegm(datetime.utcnow().timetuple())
CURRENT_VERSION_DATE = datetime.utcfromtimestamp(CURRENT_VERSION_TIMESTAMP)
APPLICATION_ID = app_identity.get_application_id()
import model
CONFIG_DB = model.Config.get_master_db()
SECRET_KEY = CONFIG_DB.flask_secret_key.encode('ascii')
except:
pass
DEFAULT_DB_LIMIT = 64
###############################################################################
# Client modules, also used by the run.py script.
###############################################################################
STYLES = [
'src/style/style.less',
]
SCRIPTS = [
('libs', [
'ext/js/jquery/jquery.js',
'ext/js/momentjs/moment.js',
'ext/js/nprogress/nprogress.js',
'ext/js/bootstrap/alert.js',
'ext/js/bootstrap/button.js',
'ext/js/bootstrap/transition.js',
'ext/js/bootstrap/collapse.js',
'ext/js/bootstrap/dropdown.js',
'ext/js/bootstrap/tooltip.js',
'ext/js/bootstrap/tab.js',
'src/lib/markdown/markdown.js',
'src/lib/markdown/to-markdown.js',
'src/lib/markdown/bootstrap-markdown.js',
]),
('scripts', [
'src/script/common/service.coffee',
'src/script/common/upload.coffee',
'src/script/common/util.coffee',
'src/script/site/app.coffee',
'src/script/site/admin.coffee',
'src/script/site/admin-box.coffee',
'src/script/site/admin-toy.coffee',
'src/script/site/pretty-file.coffee',
'src/script/site/profile.coffee',
'src/script/site/resource.coffee',
'src/script/site/signin.coffee',
'src/script/site/user.coffee',
'src/script/site/welcome.coffee',
]),
]
| mit |
AllenAkhaumere/DeepProtein | global_utils.py | 1 | 14993 | """Python utilities required Keras."""
from __future__ import absolute_import
import numpy as np
import random
import pandas as pd
import time
import sys
import six
import marshal
import types as python_types
import inspect
import tensorflow as tf
from keras.callbacks import Callback
_GLOBAL_CUSTOM_OBJECTS = {}
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
# Example
Consider a custom object `MyObject`
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
# Example
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
# Arguments
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
# Returns
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
# Example
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
# Returns
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_keras_object(instance):
if instance is None:
return None
if hasattr(instance, 'get_config'):
return {
'class_name': instance.__class__.__name__,
'config': instance.get_config()
}
if hasattr(instance, '__name__'):
return instance.__name__
else:
raise ValueError('Cannot serialize', instance)
def deserialize_keras_object(identifier, module_objects=None,
custom_objects=None,
printable_module_name='object'):
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
if 'class_name' not in config or 'config' not in config:
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name +
': ' + class_name)
if hasattr(cls, 'from_config'):
arg_spec = inspect.getargspec(cls.from_config)
if 'custom_objects' in arg_spec.args:
custom_objects = custom_objects or {}
return cls.from_config(config['config'],
custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
return cls.from_config(config['config'])
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
return cls(**config['config'])
elif isinstance(identifier, six.string_types):
function_name = identifier
if custom_objects and function_name in custom_objects:
fn = custom_objects.get(function_name)
elif function_name in _GLOBAL_CUSTOM_OBJECTS:
fn = _GLOBAL_CUSTOM_OBJECTS[function_name]
else:
fn = module_objects.get(function_name)
if fn is None:
raise ValueError('Unknown ' + printable_module_name +
':' + function_name)
return fn
else:
raise ValueError('Could not interpret serialized ' +
printable_module_name + ': ' + identifier)
def func_dump(func):
"""Serializes a user defined function.
# Arguments
func: the function to serialize.
# Returns
A tuple `(code, defaults, closure)`.
"""
code = marshal.dumps(func.__code__).decode('raw_unicode_escape')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
# Arguments
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
# Returns
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
code = marshal.loads(code.encode('raw_unicode_escape'))
if globs is None:
globs = globals()
return python_types.FunctionType(code, globs,
name=code.co_name,
argdefs=defaults,
closure=closure)
class Progbar(object):
"""Displays a progress bar.
# Arguments
target: Total number of steps expected.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05):
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.last_update = 0
self.interval = interval
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=None, force=False):
"""Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
force: Whether to force visual progress update.
"""
values = values or []
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far),
current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
if not force and (now - self.last_update) < self.interval:
return
prev_total_width = self.total_width
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
if isinstance(self.sum_values[k], list):
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * ' ')
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write('\n')
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
sys.stdout.write(info + "\n")
self.last_update = now
def add(self, n, values=None):
self.update(self.seen_so_far + n, values)
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class LossRatioLogger(Callback):
""""Override function from Callback class that logs the loss and val_loss ratio"""
def on_epoch_end(self, epoch, logs = {}):
ratio = logs.get('loss')/logs.get('val_loss')
print("Ratio(l/v_l) = {:2.2f}".format(ratio))
def protein_seq_2oneHot(sequence):
"""
Return a binary one-hot vector
"""
one_digit = {'A': 0, 'C': 1, 'D': 2, 'E': 3, 'F': 4, 'G': 5, \
'H': 6, 'I': 7, 'K': 8, 'L': 9, 'M': 10, 'N': 11, 'P': 12, \
'Q': 13, 'R': 14, 'S': 15, 'T': 16, 'V': 17, 'W': 18, 'Y': 19}
assert len(sequence) >= 1
encoded = []
for letter in sequence:
tmp = np.zeros(20)
tmp[one_digit[letter]] = 1
encoded.append(tmp)
assert len(encoded) == len(sequence)
encoded = np.asarray(encoded)
return list(encoded.flatten())
def seq(sequence):
Alph = {'A': 1, 'a': 1,
'B': 2, 'b': 2,
'C': 3, 'c': 3,
'D': 4, 'd': 4,
'E': 5, 'e': 5,
'F': 6, 'f': 6,
'G': 7, 'g': 7,
'H': 8, 'h': 8,
'I': 9, 'i': 9,
'J': 10, 'j': 10,
'K': 11, 'k': 11,
'L': 12, 'l': 12,
'M': 13, 'm': 13,
'N': 14, 'n': 14,
'O': 15, 'o': 15,
'P': 16, 'p': 16,
'Q': 17, 'q': 17,
'R': 18, 'r': 18,
'S': 19, 's': 19,
'T': 20, 't': 20,
'U': 21, 'u': 21,
'V': 22, 'v': 22,
'W': 23, 'w': 23,
'X': 24, 'x': 24,
'Y': 25, 'y': 25,
'Z': 26, 'z': 26
}
dataset = []
for d in sequence:
d1 = []
for letters in d:
d1.append(np.float32(Alph[letters]))
for j in range(20 - len(d1)):
d1.append(np.float32(0))
dataset.append(d1)
return list(dataset)
def rmsd(y, prediction):
""""Compute Root Mean Square Defference"""
return tf.sqrt(tf.reduce_mean(tf.pow(prediction - y, 2)))
def chi2(exp, obs):
"""
Compute CHI^2 statistics of non-zero expected elements
"""
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(exp, zero)
def masking(tensor, mask):
return tf.boolean_mask(tensor, mask)
stat = tf.reduce_sum(
tf.div(
tf.pow(
tf.subtract(masking(obs, mask), masking(exp, mask)),
2),
masking(exp, mask)),
name="chi2_statistics")
return stat
def generate_weights(array):
results = []
for row in array:
weigth = np.zeros(len(row))
mask = row != 0.0
weigth[mask] = 1.0
results.append(weigth)
weights = np.array(results)
return weights
def split_data(data, prob):
"""split data into fractions [prob, 1 - prob]"""
results = [], []
for row in data:
results[0 if random.random() < prob else 1].append(row)
return results
def split_data_to_train_test(data, label, test_portion):
dataset = list(zip(data, label))
# pair corresponding values
train, test = split_data(data=dataset, prob= 1 - test_portion) # split the dataset of pairs
x_train, y_train = list(zip(*train)) # magical un-zip trick
x_test, y_test = list(zip(*test))
#convert list to numpy array
x_train = np.asarray(x_train)
y_train = np.asarray(y_train)
x_test = np.asarray(x_test)
y_test = np.asarray(y_test)
return x_train, x_test, y_train, y_test
def load_data_seq_csv(filepath, x_descrip='', y_descrip=''):
dataframe = pd.read_csv(filepath)
print("Data shape: ", dataframe.shape)
dataset = dataframe
X, Y = dataset[x_descrip], dataset[y_descrip]
X, Y = map(lambda element: np.array(list(element)), X), map(lambda element: np.array(element), Y)
return list(X), list(Y) | mit |
bashrc/zeronet-debian | src/src/lib/rsa/prime.py | 194 | 3905 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Numerical functions related to primes.
Implementation based on the book Algorithm Design by Michael T. Goodrich and
Roberto Tamassia, 2002.
'''
__all__ = [ 'getprime', 'are_relatively_prime']
import rsa.randnum
def gcd(p, q):
'''Returns the greatest common divisor of p and q
>>> gcd(48, 180)
12
'''
while q != 0:
if p < q: (p,q) = (q,p)
(p,q) = (q, p % q)
return p
def jacobi(a, b):
'''Calculates the value of the Jacobi symbol (a/b) where both a and b are
positive integers, and b is odd
:returns: -1, 0 or 1
'''
assert a > 0
assert b > 0
if a == 0: return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
a, b = b % a, a
else:
if (((b * b) - 1) >> 3) & 1:
result = -result
a >>= 1
if a == 0: return 0
return result
def jacobi_witness(x, n):
'''Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
'''
j = jacobi(x, n) % n
f = pow(x, n >> 1, n)
if j == f: return False
return True
def randomized_primality_testing(n, k):
'''Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number is composite, and True if it's
probably prime.
'''
# 50% of Jacobi-witnesses can report compositness of non-prime numbers
# The implemented algorithm using the Jacobi witness function has error
# probability q <= 0.5, according to Goodrich et. al
#
# q = 0.5
# t = int(math.ceil(k / log(1 / q, 2)))
# So t = k / log(2, 2) = k / 1 = k
# this means we can use range(k) rather than range(t)
for _ in range(k):
x = rsa.randnum.randint(n-1)
if jacobi_witness(x, n): return False
return True
def is_prime(number):
'''Returns True if the number is prime, and False otherwise.
>>> is_prime(42)
False
>>> is_prime(41)
True
'''
return randomized_primality_testing(number, 6)
def getprime(nbits):
'''Returns a prime number that can be stored in 'nbits' bits.
>>> p = getprime(128)
>>> is_prime(p-1)
False
>>> is_prime(p)
True
>>> is_prime(p+1)
False
>>> from rsa import common
>>> common.bit_size(p) == 128
True
'''
while True:
integer = rsa.randnum.read_random_int(nbits)
# Make sure it's odd
integer |= 1
# Test for primeness
if is_prime(integer):
return integer
# Retry if not prime
def are_relatively_prime(a, b):
'''Returns True if a and b are relatively prime, and False if they
are not.
>>> are_relatively_prime(2, 3)
1
>>> are_relatively_prime(2, 4)
0
'''
d = gcd(a, b)
return (d == 1)
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count and count % 100 == 0:
print('%i times' % count)
print('Doctests done')
| gpl-2.0 |
rfhk/awo-custom | stock_account_adjust/stock_account.py | 2 | 4816 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) Rooms For (Hong Kong) Limited T/A OSCG (<http://www.openerp-asia.net>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID, api
import logging
_logger = logging.getLogger(__name__)
class stock_quant(osv.osv):
_inherit = "stock.quant"
def _account_entry_move(self, cr, uid, quants, move, context=None):
"""
Accounting Valuation Entries
quants: browse record list of Quants to create accounting valuation entries for. Unempty and all quants are supposed to have the same location id (thay already moved in)
move: Move to use. browse record
"""
if context is None:
context = {}
location_obj = self.pool.get('stock.location')
location_from = move.location_id
location_to = quants[0].location_id
company_from = location_obj._location_owner(cr, uid, location_from, context=context)
company_to = location_obj._location_owner(cr, uid, location_to, context=context)
if move.product_id.valuation != 'real_time':
return False
for q in quants:
# if q.owner_id:
# #if the quant isn't owned by the company, we don't make any valuation entry
# return False
# if q.owner_id.name <> q.owner_id.company_id.name:
# if q.owner_id and q.owner_id != \
# self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.partner_id:
if q.owner_id and q.owner_id != q.company_id.partner_id:
#if the quant isn't owned by the company, we don't make any valuation entry
return False
if q.qty <= 0:
#we don't make any stock valuation for negative quants because the valuation is already made for the counterpart.
#At that time the valuation will be made at the product cost price and afterward there will be new accounting entries
#to make the adjustments when we know the real cost price.
return False
#in case of routes making the link between several warehouse of the same company, the transit location belongs to this company, so we don't need to create accounting entries
# Create Journal Entry for products arriving in the company
if company_to and (move.location_id.usage not in ('internal', 'transit') and move.location_dest_id.usage == 'internal' or company_from != company_to):
ctx = context.copy()
ctx['force_company'] = company_to.id
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx)
if location_from and location_from.usage == 'customer':
#goods returned from customer
self._create_account_move_line(cr, uid, quants, move, acc_dest, acc_valuation, journal_id, context=ctx)
else:
self._create_account_move_line(cr, uid, quants, move, acc_src, acc_valuation, journal_id, context=ctx)
# Create Journal Entry for products leaving the company
if company_from and (move.location_id.usage == 'internal' and move.location_dest_id.usage not in ('internal', 'transit') or company_from != company_to):
ctx = context.copy()
ctx['force_company'] = company_from.id
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx)
if location_to and location_to.usage == 'supplier':
#goods returned to supplier
self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_src, journal_id, context=ctx)
else:
self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_dest, journal_id, context=ctx)
| lgpl-3.0 |
jhseu/tensorflow | tensorflow/python/tpu/profiler/pip_package/setup.py | 23 | 2402 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Cloud TPU profiler package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup
from cloud_tpu_profiler.version import __version__
CONSOLE_SCRIPTS = [
'capture_tpu_profile=cloud_tpu_profiler.capture_tpu_profile:run_main',
]
setup(
name='cloud_tpu_profiler',
version=__version__.replace('-', ''),
description='Trace and profile Cloud TPU performance',
long_description='Tools for capture TPU profile',
url='https://www.tensorflow.org/tfrc/',
author='Google Inc.',
author_email='[email protected]',
packages=['cloud_tpu_profiler'],
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow performance tpu',
)
| apache-2.0 |
destijl/grr | grr/lib/maintenance_utils.py | 1 | 14437 | #!/usr/bin/env python
"""This file contains utility classes related to maintenance used by GRR."""
import getpass
import hashlib
import os
import StringIO
import sys
import time
import zipfile
import logging
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import events
from grr.lib import utils
from grr.lib.aff4_objects import collects
from grr.lib.aff4_objects import users
from grr.lib.builders import signing
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
DIGEST_ALGORITHM = hashlib.sha256 # pylint: disable=invalid-name
DIGEST_ALGORITHM_STR = "sha256"
SUPPORTED_PLATFORMS = ["windows", "linux", "darwin"]
SUPPORTED_ARCHITECTURES = ["i386", "amd64"]
class Error(Exception):
"""Base error class."""
pass
class UserError(Error):
pass
def EPrint(message):
sys.stderr.write("%s\n" % message)
def UploadSignedConfigBlob(content,
aff4_path,
client_context=None,
limit=None,
token=None):
"""Upload a signed blob into the datastore.
Args:
content: File content to upload.
aff4_path: aff4 path to upload to.
client_context: The configuration contexts to use.
limit: The maximum size of the chunk to use.
token: A security token.
Raises:
IOError: On failure to write.
"""
if limit is None:
limit = config_lib.CONFIG["Datastore.maximum_blob_size"]
# Get the values of these parameters which apply to the client running on the
# target platform.
if client_context is None:
# Default to the windows client.
client_context = ["Platform:Windows", "Client Context"]
config_lib.CONFIG.Validate(
parameters="PrivateKeys.executable_signing_private_key")
sig_key = config_lib.CONFIG.Get("PrivateKeys.executable_signing_private_key",
context=client_context)
ver_key = config_lib.CONFIG.Get("Client.executable_signing_public_key",
context=client_context)
urn = collects.GRRSignedBlob.NewFromContent(
content,
aff4_path,
chunk_size=limit,
token=token,
private_key=sig_key,
public_key=ver_key)
logging.info("Uploaded to %s", urn)
def GetConfigBinaryPathType(aff4_path):
"""Take an aff4_path and return type or None.
Args:
aff4_path: An RDFURN containing the path to the binary.
Returns:
None if the path is not supported for binary upload, otherwise a type.
"""
if not aff4_path.Path().startswith("/config"):
return
components = aff4_path.RelativeName("aff4:/config").split("/")
if components[0] == "executables" and components[1] in SUPPORTED_PLATFORMS:
return "GRRSignedBlob"
elif components[0] == "python_hacks":
return "GRRSignedBlob"
else:
return
def CreateBinaryConfigPaths(token=None):
"""Create the paths required for binary configs."""
required_urns = set()
try:
# We weren't already initialized, create all directories we will need.
for platform in SUPPORTED_PLATFORMS:
required_urns.add("aff4:/config/executables/%s/agentupdates" % platform)
required_urns.add("aff4:/config/executables/%s/installers" % platform)
existing_urns = [
x["urn"] for x in aff4.FACTORY.Stat(
list(required_urns), token=token)
]
missing_urns = required_urns - set(existing_urns)
# One by one is not optimal but we have to do it only once per urn.
for urn in missing_urns:
aff4.FACTORY.Create(urn, aff4.AFF4Volume, token=token).Flush()
except access_control.UnauthorizedAccess:
logging.info("User is not admin, cannot check configuration tree.")
return
def _SignWindowsComponent(component, output_filename):
print "Enter passphrase for code signing cert:"
passwd = getpass.getpass()
cert = config_lib.CONFIG.Get("ClientBuilder.windows_signing_cert")
key = config_lib.CONFIG.Get("ClientBuilder.windows_signing_key")
app_name = config_lib.CONFIG.Get(
"ClientBuilder.windows_signing_application_name")
signer = signing.WindowsCodeSigner(cert, key, passwd, app_name)
with utils.TempDirectory() as temp_dir:
zip_file = zipfile.ZipFile(StringIO.StringIO(component.raw_data))
zip_file.extractall(temp_dir)
new_data = StringIO.StringIO()
new_zipfile = zipfile.ZipFile(
new_data, mode="w", compression=zipfile.ZIP_DEFLATED)
for root, _, files in os.walk(temp_dir):
for basename in files:
basename = basename.lstrip("\\/")
filename = os.path.join(root, basename)
# The relative filename to the root of the zip file.
relative_filename = filename[len(temp_dir):].lstrip("/")
extension = os.path.splitext(filename)[1].lower()
if extension in [".sys", ".exe", ".dll", ".pyd"]:
out_filename = filename + ".signed"
signer.SignFile(filename, out_filename=out_filename)
new_zipfile.write(out_filename, arcname=relative_filename)
else:
new_zipfile.write(filename, arcname=relative_filename)
# Flush the Zip file.
new_zipfile.close()
component.raw_data = new_data.getvalue()
with open(output_filename, "wb") as out_fd:
out_fd.write(component.SerializeToString())
def SignComponentContent(component_filename, output_filename):
"""Some OSs require the contents of a component to be signed as well.
Specifically this action unzips the component and authenticode signs all
binaries. The component is then repacked.
Args:
component_filename: The filename of the component.
output_filename: We write the new signed component here.
Raises:
RuntimeError: If called for any other OS than windows.
"""
component = rdf_client.ClientComponent.FromSerializedString(
open(component_filename, "rb").read())
EPrint("Opened component %s." % component.summary.name)
if component.build_system.system == "Windows":
_SignWindowsComponent(component, output_filename)
return
raise RuntimeError("Component signing is not implemented for OS %s." %
component.build_system.system)
def SignComponent(component_filename, overwrite=False, token=None):
"""Sign and upload the component to the data store."""
EPrint("Signing and uploading component %s" % component_filename)
serialized_component = open(component_filename, "rb").read()
component = rdf_client.ClientComponent.FromSerializedString(
serialized_component)
EPrint("Opened component %s." % component.summary.name)
client_context = [
"Platform:%s" % component.build_system.system.title(),
"Arch:%s" % component.build_system.arch
]
sig_key = config_lib.CONFIG.Get("PrivateKeys.executable_signing_private_key",
context=client_context)
ver_key = config_lib.CONFIG.Get("Client.executable_signing_public_key",
context=client_context)
# For each platform specific component, we have a component summary object
# which contains high level information in common to all components of this
# specific version.
component_urn = config_lib.CONFIG.Get("Config.aff4_root").Add(
"components").Add("%s_%s" %
(component.summary.name, component.summary.version))
component_fd = aff4.FACTORY.Create(
component_urn, collects.ComponentObject, mode="rw", token=token)
component_summary = component_fd.Get(component_fd.Schema.COMPONENT)
if overwrite or component_summary is None:
EPrint("Storing component summary at %s" % component_urn)
component_summary = component.summary
component_summary.seed = "%x%x" % (time.time(), utils.PRNG.GetULong())
component_summary.url = (
config_lib.CONFIG.Get("Client.component_url_stem",
context=client_context) + component_summary.seed)
component_fd.Set(component_fd.Schema.COMPONENT, component_summary)
component_fd.Close()
else:
EPrint("Using seed from stored component summary at %s" % component_urn)
component.summary.url = component_summary.url
component.summary.seed = component_summary.seed
# Sign the component, encrypt it and store it at the static aff4 location.
signed_component = rdf_crypto.SignedBlob()
signed_component.Sign(component.SerializeToString(), sig_key, ver_key)
aff4_urn = config_lib.CONFIG.Get(
"Client.component_aff4_stem", context=client_context).Add(
component.summary.seed).Add(component.build_system.signature())
EPrint("Storing signed component at %s" % aff4_urn)
with aff4.FACTORY.Create(aff4_urn, aff4.AFF4MemoryStream, token=token) as fd:
fd.Write(
component_summary.cipher.Encrypt(signed_component.SerializeToString()))
return component
def SignAllComponents(overwrite=False, token=None):
components_dir = config_lib.CONFIG["ClientBuilder.components_dir"]
for root, _, files in os.walk(components_dir):
for f in files:
if os.path.splitext(f)[1] != ".bin":
continue
component_filename = os.path.join(root, f)
try:
SignComponent(component_filename, overwrite=overwrite, token=token)
except Exception as e: # pylint: disable=broad-except
EPrint("Could not sign component %s: %s" % (component_filename, e))
def ListComponents(token=None):
component_root = aff4.FACTORY.Open("aff4:/config/components", token=token)
for component in component_root.OpenChildren():
if not isinstance(component, collects.ComponentObject):
continue
desc = component.Get(component.Schema.COMPONENT)
if not desc:
continue
EPrint("* Component %s (version %s)" % (desc.name, desc.version))
versions = []
base_urn = "aff4:/web%s" % desc.url
for urn, _, _ in data_store.DB.ScanAttribute(
base_urn, "aff4:type", token=token):
versions.append(urn.split("/")[-1])
if not versions:
EPrint("No platform signatures available.")
else:
EPrint("Available platform signatures:")
for v in sorted(versions):
EPrint("- %s" % v)
def ShowUser(username, token=None):
"""Implementation of the show_user command."""
if username is None:
fd = aff4.FACTORY.Open("aff4:/users", token=token)
for user in fd.OpenChildren():
if isinstance(user, users.GRRUser):
EPrint(user.Describe())
else:
user = aff4.FACTORY.Open("aff4:/users/%s" % username, token=token)
if isinstance(user, users.GRRUser):
EPrint(user.Describe())
else:
EPrint("User %s not found" % username)
def AddUser(username, password=None, labels=None, token=None):
"""Implementation of the add_user command."""
token = data_store.GetDefaultToken(token)
user_urn = "aff4:/users/%s" % username
try:
if aff4.FACTORY.Open(user_urn, users.GRRUser, token=token):
raise UserError("Cannot add user %s: User already exists." % username)
except aff4.InstantiationError:
pass
fd = aff4.FACTORY.Create(user_urn, users.GRRUser, mode="rw", token=token)
# Note this accepts blank passwords as valid.
if password is None:
password = getpass.getpass(prompt="Please enter password for user '%s': " %
username)
fd.SetPassword(password)
if labels:
fd.AddLabels(*set(labels), owner="GRR")
fd.Close()
EPrint("Added user %s." % username)
events.Events.PublishEvent(
"Audit",
events.AuditEvent(
user=token.username, action="USER_ADD", urn=user_urn),
token=token)
def UpdateUser(username,
password,
add_labels=None,
delete_labels=None,
token=None):
"""Implementation of the update_user command."""
token = data_store.GetDefaultToken(token)
user_urn = "aff4:/users/%s" % username
try:
fd = aff4.FACTORY.Open(user_urn, users.GRRUser, mode="rw", token=token)
except aff4.InstantiationError:
raise UserError("User %s does not exist." % username)
# Note this accepts blank passwords as valid.
if password:
if not isinstance(password, basestring):
password = getpass.getpass(
prompt="Please enter password for user '%s': " % username)
fd.SetPassword(password)
# Use sets to dedup input.
current_labels = set()
# Build a list of existing labels.
for label in fd.GetLabels():
current_labels.add(label.name)
# Build a list of labels to be added.
expanded_add_labels = set()
if add_labels:
for label in add_labels:
# Split up any space or comma separated labels in the list.
labels = label.split(",")
expanded_add_labels.update(labels)
# Build a list of labels to be removed.
expanded_delete_labels = set()
if delete_labels:
for label in delete_labels:
# Split up any space or comma separated labels in the list.
labels = label.split(",")
expanded_delete_labels.update(labels)
# Set subtraction to remove labels being added and deleted at the same time.
clean_add_labels = expanded_add_labels - expanded_delete_labels
clean_del_labels = expanded_delete_labels - expanded_add_labels
# Create final list using difference to only add new labels.
final_add_labels = clean_add_labels - current_labels
# Create final list using intersection to only remove existing labels.
final_del_labels = clean_del_labels & current_labels
if final_add_labels:
fd.AddLabels(*final_add_labels, owner="GRR")
if final_del_labels:
fd.RemoveLabels(*final_del_labels, owner="GRR")
fd.Close()
EPrint("Updated user %s" % username)
ShowUser(username, token=token)
events.Events.PublishEvent(
"Audit",
events.AuditEvent(
user=token.username, action="USER_UPDATE", urn=user_urn),
token=token)
def DeleteUser(username, token=None):
"""Deletes an existing user."""
token = data_store.GetDefaultToken(token)
user_urn = "aff4:/users/%s" % username
try:
aff4.FACTORY.Open(user_urn, users.GRRUser, token=token)
except aff4.InstantiationError:
EPrint("User %s not found." % username)
return
aff4.FACTORY.Delete(user_urn, token=token)
EPrint("User %s has been deleted." % username)
events.Events.PublishEvent(
"Audit",
events.AuditEvent(
user=token.username, action="USER_DELETE", urn=user_urn),
token=token)
| apache-2.0 |
turingmachine/aubio | tests/python/src/spectral/fft.py | 4 | 5844 | from template import aubio_unit_template
from localaubio import *
import math
buf_size = 1024
channels = 4
class fft_unit(aubio_unit_template):
def setUp(self):
self.o = new_aubio_fft(buf_size, channels)
def tearDown(self):
del_aubio_fft(self.o)
def test_create(self):
""" test creation and deletion of fft object """
pass
def test_do_zeroes(self):
""" test aubio_fft_do on zeroes """
input = new_fvec(buf_size, channels)
fftgrain = new_cvec(buf_size, channels)
for index in range(buf_size):
for channel in range(channels):
self.assertCloseEnough(0., fvec_read_sample(input, channel, index))
aubio_fft_do(self.o, input, fftgrain)
for index in range(buf_size/2+1):
for channel in range(channels):
self.assertCloseEnough(0., cvec_read_norm(fftgrain, channel, index))
for index in range(buf_size/2+1):
for channel in range(channels):
self.assertCloseEnough(0., cvec_read_phas(fftgrain, channel, index))
del fftgrain
del input
def test_rdo_zeroes(self):
""" test aubio_fft_rdo on zeroes """
fftgrain = new_cvec(buf_size, channels)
output = new_fvec(buf_size, channels)
aubio_fft_rdo(self.o, fftgrain, output)
# check output
for index in range(buf_size):
for channel in range(channels):
self.assertEqual(0., fvec_read_sample(output, channel, index))
del fftgrain
del output
def test_do_impulse(self):
""" test aubio_fft_do with an impulse on one channel """
input = new_fvec(buf_size, channels)
fftgrain = new_cvec(buf_size, channels)
# write impulse in channel 0, sample 0.
some_constant = 0.3412432456
fvec_write_sample(input, some_constant, 0, 0)
aubio_fft_do(self.o, input, fftgrain)
# check norm
for index in range(buf_size/2+1):
self.assertCloseEnough(some_constant, cvec_read_norm(fftgrain, 0, index))
for index in range(buf_size/2+1):
for channel in range(1, channels):
self.assertEqual(0., cvec_read_norm(fftgrain, channel, index))
# check phas
for index in range(buf_size/2+1):
for channel in range(channels):
self.assertEqual(0., cvec_read_phas(fftgrain, channel, index))
del fftgrain
del input
def test_do_constant(self):
""" test aubio_fft_do with a constant on one channel """
input = new_fvec(buf_size, channels)
fftgrain = new_cvec(buf_size, channels)
# write impulse in channel 0, sample 0.
some_constant = 0.003412432456
for index in range(1,buf_size):
fvec_write_sample(input, some_constant, 0, index)
aubio_fft_do(self.o, input, fftgrain)
# check norm and phase == 0 in all other channels
for index in range(buf_size/2+1):
for channel in range(1, channels):
self.assertEqual(0., cvec_read_norm(fftgrain, channel, index))
self.assertEqual(0., cvec_read_phas(fftgrain, channel, index))
# check norm and phase == 0 in first first and last bin of first channel
# check unwrap2pi(phas) ~= pi everywhere but in first and last bin
self.assertCloseEnough(0., cvec_read_phas(fftgrain, 0, 0))
for index in range(1,buf_size/2):
self.assertCloseEnough(math.pi, aubio_unwrap2pi(cvec_read_phas(fftgrain, 0, index)))
self.assertCloseEnough(0., cvec_read_phas(fftgrain, 0, buf_size/2))
self.assertCloseEnough(0., cvec_read_phas(fftgrain, 0, buf_size/2+1))
self.assertCloseEnough((buf_size-1)*some_constant, cvec_read_norm(fftgrain, 0, 0))
for index in range(1,buf_size/2+1):
self.assertCloseEnough(some_constant, cvec_read_norm(fftgrain, 0, index))
self.assertCloseEnough(0., cvec_read_norm(fftgrain, 0, buf_size/2+1))
del fftgrain
del input
def test_do_impulse_multichannel(self):
" test aubio_fft_do on impulse two channels "
input = new_fvec(buf_size, channels)
fftgrain = new_cvec(buf_size, channels)
# put an impulse in first an last channel, at first and last index
fvec_write_sample(input, 1., 0, 0)
fvec_write_sample(input, 1., channels-1, 0)
aubio_fft_do(self.o, input, fftgrain)
# check the norm
for index in range(buf_size/2+1):
self.assertEqual(1., cvec_read_norm(fftgrain, 0, index))
for index in range(buf_size/2+1):
for channel in range(1, channels-1):
self.assertEqual(0., cvec_read_norm(fftgrain, channel, index))
for index in range(buf_size/2+1):
self.assertEqual(1., cvec_read_norm(fftgrain, channels-1, index))
# check the phase
for index in range(buf_size/2+1):
for channel in range(channels):
self.assertEqual(0., cvec_read_phas(fftgrain, channel, index))
del fftgrain
del input
def test_rdo_impulse(self):
""" test aubio_fft_rdo on impulse """
fftgrain = new_cvec(buf_size, channels)
for channel in range(channels):
cvec_write_norm(fftgrain, 1., channel, 0)
output = new_fvec(buf_size, channels)
aubio_fft_rdo(self.o, fftgrain, output)
for index in range(buf_size/2+1):
for channel in range(channels):
self.assertCloseEnough(fvec_read_sample(output, channel, index), 1./buf_size)
del fftgrain
del output
def test_do_back_and_forth(self):
""" test aubio_fft_rdo on a constant """
input = new_fvec(buf_size, channels)
output = new_fvec(buf_size, channels)
fftgrain = new_cvec(buf_size, channels)
for index in range(buf_size/2+1):
for channel in range(channels):
fvec_write_sample(input, 0.67, channel, index)
aubio_fft_do(self.o, input, fftgrain)
aubio_fft_rdo(self.o, fftgrain, output)
for index in range(buf_size/2+1):
for channel in range(channels):
self.assertCloseEnough(0.67, fvec_read_sample(output, channel, index))
del fftgrain
del output
if __name__ == '__main__': unittest.main()
| gpl-3.0 |
h3llrais3r/SickRage | lib/tzlocal/darwin.py | 6 | 1042 | from __future__ import with_statement
import os
import pytz
import subprocess
_cache_tz = None
def _get_localzone():
pipe = subprocess.Popen(
"systemsetup -gettimezone",
shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
tzname = pipe.stdout.read().replace(b'Time Zone: ', b'').strip()
if not tzname or tzname not in pytz.all_timezones_set:
# link will be something like /usr/share/zoneinfo/America/Los_Angeles.
link = os.readlink("/etc/localtime")
tzname = link[link.rfind("zoneinfo/") + 9:]
pipe.stdout.close()
pipe.stderr.close()
return pytz.timezone(tzname)
def get_localzone():
"""Get the computers configured local timezone, if any."""
global _cache_tz
if _cache_tz is None:
_cache_tz = _get_localzone()
return _cache_tz
def reload_localzone():
"""Reload the cached localzone. You need to call this if the timezone has changed."""
global _cache_tz
_cache_tz = _get_localzone()
return _cache_tz
| gpl-3.0 |
sushi-irc/tekka | tekka/lib/fading_box.py | 1 | 6498 | """
Copyright (c) 2010 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import gobject
import gtk
class FadingBox(gtk.EventBox):
""" Has the ability to fade it's background color to another """
""" NOTE: color attributes suffer from integer overflow (0xffff) """
MAX_FADING_TIME = 700 # miliseconds
# background color as defined in style rc
origin_bg_color = property(lambda s: s._origin_bg_color.copy(), None)
# bool property if fading is in progress
is_fading = property(lambda s: s._timeout_id != None, None)
def __init__(self):
super(FadingBox,self).__init__()
self._origin_bg_color = self.get_style().bg[gtk.STATE_NORMAL].copy()
self._timeout_id = None
self._timer = None
def reset_background(self):
""" reset the background color to what it should be according
to the gtkrc loaded
"""
style = self.get_style().copy()
style.bg[gtk.STATE_NORMAL] = self._origin_bg_color.copy()
self.set_style(style)
def _fade_timeout(self, color):
def do_timeout(self, color):
style = self.get_style().copy()
style.bg[gtk.STATE_NORMAL] = color.copy()
self.set_style(style)
self.stop_fading()
gobject.idle_add(do_timeout, self, color)
return False
def _fade_channel(self, channel, reference):
if channel == reference:
return (True, reference)
if channel > reference:
up = (channel / 10) + 1
if channel-up < 0 or channel-up < reference:
return (True, reference)
return (False, channel-up)
else:
up = (reference / 10) + 1
if channel+up >= 0xffff or channel+up > reference:
return (True, reference)
return (False, channel+up)
def _fade_bg(self, color):
""" modifiy each color channel of the background color according
to color and refresh the style with the new background color
"""
if not self._timeout_id:
return False
bg_color = self.bg_color
(rdone, bg_color.red) = self._fade_channel(bg_color.red,
color.red)
(gdone, bg_color.green) = self._fade_channel(bg_color.green,
color.green)
(bdone, bg_color.blue) = self._fade_channel(bg_color.blue,
color.blue)
self.bg_color = bg_color
style = self.get_style().copy()
style.bg[gtk.STATE_NORMAL] = bg_color.copy()
self.set_style(style)
if rdone and gdone and bdone:
self.stop_fading()
return False
return True
def fade(self, to_color, interval=40):
""" fade the background color of this box to the given color.
interval is the time in miliseconds between each fade
"""
if self._timeout_id:
return False # already fading
style = self.get_style()
self.bg_color = style.bg[gtk.STATE_NORMAL].copy()
self._timeout_id = gobject.timeout_add(interval,
self._fade_bg,
to_color)
self._timer = gobject.timeout_add(self.MAX_FADING_TIME,
self._fade_timeout,
to_color)
return True
def stop_fading(self):
if self._timeout_id:
gobject.source_remove(self._timeout_id)
gobject.source_remove(self._timer)
self._timeout_id = None
self._timer = None
self.emit("fade-finished")
gobject.signal_new("fade-finished", FadingBox, gobject.SIGNAL_ACTION,
None, ())
if __name__ == "__main__":
win = gtk.Window()
win.set_default_size(500,300)
vbox = gtk.VBox()
win.add(vbox)
box = FadingBox()
vbox.pack_start(box)
btn = gtk.Button("Green")
vbox.add(btn)
btn2 = gtk.Button("Background")
vbox.add(btn2)
btn3 = gtk.Button("Blue")
vbox.add(btn3)
btn4 = gtk.Button("Red")
vbox.add(btn4)
box.fade(gtk.gdk.Color("#0000ff"))
btn5 = gtk.Button("Black")
vbox.add(btn5)
def btn_clicked_cb(btn):
"""
def do_setup():
def new_fade(box):
if new_fade.ran:
return
print "new fade"
c = gtk.gdk.Color("#FF0000")
box.fade(c)
new_fade.ran = True
c = gtk.gdk.Color("#00FF00")
box.fade(c)
new_fade.ran = False
if not btn_clicked_cb.init:
box.connect("fade-finished", new_fade)
btn_clicked_cb.init = True
box.stop_fading()
box.reset_background()
gobject.timeout_add(1000, do_setup)
"""
"""
def finished_cb(box, callback):
self = finished_cb
if self.count == 0:
callback()
return
if self.back_fade:
gobject.idle_add(box.fade, self.two)
else:
gobject.idle_add(box.fade, self.one)
self.back_fade = not self.back_fade
self.count -= 1
def killsig():
box.disconnect(killsig.handle)
handle = box.connect("fade-finished", finished_cb, killsig)
finished_cb.back_fade = False
finished_cb.one = gtk.gdk.Color("#0f0")
finished_cb.two = box.origin_bg_color
finished_cb.count = 4
killsig.handle = handle
box.fade(gtk.gdk.Color("#0f0"))
"""
box.fade(gtk.gdk.Color("#0f0"))
def btn2_clicked_cb(btn):
box.fade(box.origin_bg_color)
def btn3_clicked_cb(btn):
box.fade(gtk.gdk.Color("#00f"))
def btn4_clicked_cb(btn):
box.fade(gtk.gdk.Color("#f00"))
def btn5_clicked_cb(btn):
box.fade(gtk.gdk.Color("#000"))
btn_clicked_cb.init = False
btn.connect("clicked", btn_clicked_cb)
btn2.connect("clicked", btn2_clicked_cb)
btn3.connect("clicked", btn3_clicked_cb)
btn4.connect("clicked", btn4_clicked_cb)
btn5.connect("clicked", btn5_clicked_cb)
win.connect("destroy", lambda w: gtk.main_quit())
win.show_all()
gtk.main()
| bsd-2-clause |
ctools/ctools | test/dev/cta_make_nodes.py | 1 | 3101 | #! /usr/bin/env python
# ==========================================================================
# This Python script creates the node section of the NodeFunction using
# logarithmically spaced energy bins. The intensity scale is set to the
# HESS Crab intensity (assuming a power law).
#
# Copyright (C) 2012-2016 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import sys
import math
# ============== #
# Write one node #
# ============== #
def write_node(f, energy, scale):
"""
Writes one node to XML file.
"""
# Convert to strings
str_energy = str(energy)
str_scale = str(scale)
# Write start tag
f.write(' <node>\n')
# Write energy
f.write(' <parameter scale="1e6" name="Energy"')
f.write(' min="'+str_energy+'" max="'+str_energy+'"')
f.write(' value="'+str_energy+'"')
f.write(' free="0"/>\n')
# Write intensity
f.write(' <parameter scale="'+str_scale+'" name="Intensity"')
f.write(' min="1e-5" max="1e5"')
f.write(' value="1.0"')
f.write(' free="1"/>\n')
# Write end tag
f.write(' </node>\n')
# Return
return
# ============ #
# Create nodes #
# ============ #
def create_nodes(emin, emax, enumbins):
"""
Create nodes (energies in TeV).
"""
# Open file
f = open("nodes.xml", "w")
# Set node boundaries
elogmin = math.log10(float(emin))
elogmax = math.log10(float(emax))
elogbin = (elogmax - elogmin)/(float(enumbins)-1.0)
# Fill arrays
for i in range(int(enumbins)):
# Compute energy
energy = math.pow(10.0, i*elogbin+elogmin)
# Compute scale (HESS Crab spectrum)
scale = 3.45e-17 * math.pow(energy, -2.63)
# Write node
write_node(f, energy, scale)
# Debug
#sys.stdout.write(energy)
#sys.stdout.write(math.pow(10.0, int(math.log10(scale))-1.0)±"\n")
# Close file
f.close()
# Return
return
# ======================= #
# Main script entry point #
# ======================= #
if __name__ == '__main__':
# Check command line
usage = "Usage: cta_make_nodes emin emax enumbins"
if len(sys.argv) < 3:
sys.stdout.write(usage+"\n")
sys.exit()
# Extract parameters
emin = sys.argv[1]
emax = sys.argv[2]
enumbins = sys.argv[3]
# Create nodes
create_nodes(emin, emax, enumbins)
| gpl-3.0 |
Panagiotis-Kon/empower-runtime | empower/core/account.py | 1 | 3287 | #!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""EmPOWER Account Class."""
from empower.persistence import Session
from empower.persistence.persistence import TblAccount
ROLE_ADMIN = "admin"
ROLE_USER = "user"
class Account(object):
"""An user account on this controller."""
def __init__(self, username, password, name, surname, email, role):
self._username = username
self._password = password
self._role = role
self._name = name
self._surname = surname
self._email = email
def to_dict(self):
"""Return JSON-serializable representation of the object."""
return {'username': self.username,
'role': self.role,
'name': self.name,
'surname': self.surname,
'email': self.email}
@property
def username(self):
"""Get username."""
return self._username
@property
def password(self):
"""Get password"""
return self._password
@property
def role(self):
"""Get role."""
return self._role
@property
def name(self):
"""Get name."""
return self._name
@property
def surname(self):
"""Get surname."""
return self._surname
@property
def email(self):
"""Get email."""
return self._email
@name.setter
def name(self, name):
"""Set name."""
session = Session()
account = session.query(TblAccount) \
.filter(TblAccount.username == self.username) \
.first()
account.name = name
session.commit()
self._name = name
@surname.setter
def surname(self, surname):
"""Set surname."""
session = Session()
account = Session().query(TblAccount) \
.filter(TblAccount.username == self.username) \
.first()
account.surname = surname
session.commit()
self._surname = surname
@email.setter
def email(self, email):
"""Set email."""
session = Session()
account = Session().query(TblAccount) \
.filter(TblAccount.username == self.username) \
.first()
account.email = email
session.commit()
self._email = email
def __str__(self):
return str(self.username)
def __hash__(self):
return hash(self.username)
def __eq__(self, other):
if isinstance(other, Account):
return self.username == other.username
return False
def __ne__(self, other):
return not self.__eq__(other)
| apache-2.0 |
kmacinnis/sympy | sympy/polys/polyquinticconst.py | 117 | 96143 | """
Solving solvable quintics - An implementation of DS Dummit's paper
Paper :
http://www.ams.org/journals/mcom/1991-57-195/S0025-5718-1991-1079014-X/S0025-5718-1991-1079014-X.pdf
Mathematica notebook:
http://www.emba.uvm.edu/~ddummit/quintics/quintics.nb
"""
from __future__ import print_function, division
from sympy.core import S, Symbol
from sympy.core.numbers import I
from sympy.polys.polytools import Poly
from sympy.core.evalf import N
from sympy.functions import sqrt
from sympy.utilities import public
x = Symbol('x')
@public
class PolyQuintic(object):
"""Special functions for solvable quintics"""
def __init__(self, poly):
_, _, self.p, self.q, self.r, self.s = poly.all_coeffs()
self.zeta1 = S(-1)/4 + (sqrt(5)/4) + I*sqrt((sqrt(5)/8) + S(5)/8)
self.zeta2 = (-sqrt(5)/4) - S(1)/4 + I*sqrt((-sqrt(5)/8) + S(5)/8)
self.zeta3 = (-sqrt(5)/4) - S(1)/4 - I*sqrt((-sqrt(5)/8) + S(5)/8)
self.zeta4 = S(-1)/4 + (sqrt(5)/4) - I*sqrt((sqrt(5)/8) + S(5)/8)
@property
def f20(self):
p, q, r, s = self.p, self.q, self.r, self.s
f20 = q**8 - 13*p*q**6*r + p**5*q**2*r**2 + 65*p**2*q**4*r**2 - 4*p**6*r**3 - 128*p**3*q**2*r**3 + 17*q**4*r**3 + 48*p**4*r**4 - 16*p*q**2*r**4 - 192*p**2*r**5 + 256*r**6 - 4*p**5*q**3*s - 12*p**2*q**5*s + 18*p**6*q*r*s + 12*p**3*q**3*r*s - 124*q**5*r*s + 196*p**4*q*r**2*s + 590*p*q**3*r**2*s - 160*p**2*q*r**3*s - 1600*q*r**4*s - 27*p**7*s**2 - 150*p**4*q**2*s**2 - 125*p*q**4*s**2 - 99*p**5*r*s**2 - 725*p**2*q**2*r*s**2 + 1200*p**3*r**2*s**2 + 3250*q**2*r**2*s**2 - 2000*p*r**3*s**2 - 1250*p*q*r*s**3 + 3125*p**2*s**4 - 9375*r*s**4-(2*p*q**6 - 19*p**2*q**4*r + 51*p**3*q**2*r**2 - 3*q**4*r**2 - 32*p**4*r**3 - 76*p*q**2*r**3 + 256*p**2*r**4 - 512*r**5 + 31*p**3*q**3*s + 58*q**5*s - 117*p**4*q*r*s - 105*p*q**3*r*s - 260*p**2*q*r**2*s + 2400*q*r**3*s + 108*p**5*s**2 + 325*p**2*q**2*s**2 - 525*p**3*r*s**2 - 2750*q**2*r*s**2 + 500*p*r**2*s**2 - 625*p*q*s**3 + 3125*s**4)*x+(p**2*q**4 - 6*p**3*q**2*r - 8*q**4*r + 9*p**4*r**2 + 76*p*q**2*r**2 - 136*p**2*r**3 + 400*r**4 - 50*p*q**3*s + 90*p**2*q*r*s - 1400*q*r**2*s + 625*q**2*s**2 + 500*p*r*s**2)*x**2-(2*q**4 - 21*p*q**2*r + 40*p**2*r**2 - 160*r**3 + 15*p**2*q*s + 400*q*r*s - 125*p*s**2)*x**3+(2*p*q**2 - 6*p**2*r + 40*r**2 - 50*q*s)*x**4 + 8*r*x**5 + x**6
return Poly(f20, x)
@property
def b(self):
p, q, r, s = self.p, self.q, self.r, self.s
b = ( [], [0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0],)
b[1][5] = 100*p**7*q**7 + 2175*p**4*q**9 + 10500*p*q**11 - 1100*p**8*q**5*r - 27975*p**5*q**7*r - 152950*p**2*q**9*r + 4125*p**9*q**3*r**2 + 128875*p**6*q**5*r**2 + 830525*p**3*q**7*r**2 - 59450*q**9*r**2 - 5400*p**10*q*r**3 - 243800*p**7*q**3*r**3 - 2082650*p**4*q**5*r**3 + 333925*p*q**7*r**3 + 139200*p**8*q*r**4 + 2406000*p**5*q**3*r**4 + 122600*p**2*q**5*r**4 - 1254400*p**6*q*r**5 - 3776000*p**3*q**3*r**5 - 1832000*q**5*r**5 + 4736000*p**4*q*r**6 + 6720000*p*q**3*r**6 - 6400000*p**2*q*r**7 + 900*p**9*q**4*s + 37400*p**6*q**6*s + 281625*p**3*q**8*s + 435000*q**10*s - 6750*p**10*q**2*r*s - 322300*p**7*q**4*r*s - 2718575*p**4*q**6*r*s - 4214250*p*q**8*r*s + 16200*p**11*r**2*s + 859275*p**8*q**2*r**2*s + 8925475*p**5*q**4*r**2*s + 14427875*p**2*q**6*r**2*s - 453600*p**9*r**3*s - 10038400*p**6*q**2*r**3*s - 17397500*p**3*q**4*r**3*s + 11333125*q**6*r**3*s + 4451200*p**7*r**4*s + 15850000*p**4*q**2*r**4*s - 34000000*p*q**4*r**4*s - 17984000*p**5*r**5*s + 10000000*p**2*q**2*r**5*s + 25600000*p**3*r**6*s + 8000000*q**2*r**6*s - 6075*p**11*q*s**2 + 83250*p**8*q**3*s**2 + 1282500*p**5*q**5*s**2 + 2862500*p**2*q**7*s**2 - 724275*p**9*q*r*s**2 - 9807250*p**6*q**3*r*s**2 - 28374375*p**3*q**5*r*s**2 - 22212500*q**7*r*s**2 + 8982000*p**7*q*r**2*s**2 + 39600000*p**4*q**3*r**2*s**2 + 61746875*p*q**5*r**2*s**2 + 1010000*p**5*q*r**3*s**2 + 1000000*p**2*q**3*r**3*s**2 - 78000000*p**3*q*r**4*s**2 - 30000000*q**3*r**4*s**2 - 80000000*p*q*r**5*s**2 + 759375*p**10*s**3 + 9787500*p**7*q**2*s**3 + 39062500*p**4*q**4*s**3 + 52343750*p*q**6*s**3 - 12301875*p**8*r*s**3 - 98175000*p**5*q**2*r*s**3 - 225078125*p**2*q**4*r*s**3 + 54900000*p**6*r**2*s**3 + 310000000*p**3*q**2*r**2*s**3 + 7890625*q**4*r**2*s**3 - 51250000*p**4*r**3*s**3 + 420000000*p*q**2*r**3*s**3 - 110000000*p**2*r**4*s**3 + 200000000*r**5*s**3 - 2109375*p**6*q*s**4 + 21093750*p**3*q**3*s**4 + 89843750*q**5*s**4 - 182343750*p**4*q*r*s**4 - 733203125*p*q**3*r*s**4 + 196875000*p**2*q*r**2*s**4 - 1125000000*q*r**3*s**4 + 158203125*p**5*s**5 + 566406250*p**2*q**2*s**5 - 101562500*p**3*r*s**5 + 1669921875*q**2*r*s**5 - 1250000000*p*r**2*s**5 + 1220703125*p*q*s**6 - 6103515625*s**7
b[1][4] = -1000*p**5*q**7 - 7250*p**2*q**9 + 10800*p**6*q**5*r + 96900*p**3*q**7*r + 52500*q**9*r - 37400*p**7*q**3*r**2 - 470850*p**4*q**5*r**2 - 640600*p*q**7*r**2 + 39600*p**8*q*r**3 + 983600*p**5*q**3*r**3 + 2848100*p**2*q**5*r**3 - 814400*p**6*q*r**4 - 6076000*p**3*q**3*r**4 - 2308000*q**5*r**4 + 5024000*p**4*q*r**5 + 9680000*p*q**3*r**5 - 9600000*p**2*q*r**6 - 13800*p**7*q**4*s - 94650*p**4*q**6*s + 26500*p*q**8*s + 86400*p**8*q**2*r*s + 816500*p**5*q**4*r*s + 257500*p**2*q**6*r*s - 91800*p**9*r**2*s - 1853700*p**6*q**2*r**2*s - 630000*p**3*q**4*r**2*s + 8971250*q**6*r**2*s + 2071200*p**7*r**3*s + 7240000*p**4*q**2*r**3*s - 29375000*p*q**4*r**3*s - 14416000*p**5*r**4*s + 5200000*p**2*q**2*r**4*s + 30400000*p**3*r**5*s + 12000000*q**2*r**5*s - 64800*p**9*q*s**2 - 567000*p**6*q**3*s**2 - 1655000*p**3*q**5*s**2 - 6987500*q**7*s**2 - 337500*p**7*q*r*s**2 - 8462500*p**4*q**3*r*s**2 + 5812500*p*q**5*r*s**2 + 24930000*p**5*q*r**2*s**2 + 69125000*p**2*q**3*r**2*s**2 - 103500000*p**3*q*r**3*s**2 - 30000000*q**3*r**3*s**2 - 90000000*p*q*r**4*s**2 + 708750*p**8*s**3 + 5400000*p**5*q**2*s**3 - 8906250*p**2*q**4*s**3 - 18562500*p**6*r*s**3 + 625000*p**3*q**2*r*s**3 - 29687500*q**4*r*s**3 + 75000000*p**4*r**2*s**3 + 416250000*p*q**2*r**2*s**3 - 60000000*p**2*r**3*s**3 + 300000000*r**4*s**3 - 71718750*p**4*q*s**4 - 189062500*p*q**3*s**4 - 210937500*p**2*q*r*s**4 - 1187500000*q*r**2*s**4 + 187500000*p**3*s**5 + 800781250*q**2*s**5 + 390625000*p*r*s**5
b[1][3] = 500*p**6*q**5 + 6350*p**3*q**7 + 19800*q**9 - 3750*p**7*q**3*r - 65100*p**4*q**5*r - 264950*p*q**7*r + 6750*p**8*q*r**2 + 209050*p**5*q**3*r**2 + 1217250*p**2*q**5*r**2 - 219000*p**6*q*r**3 - 2510000*p**3*q**3*r**3 - 1098500*q**5*r**3 + 2068000*p**4*q*r**4 + 5060000*p*q**3*r**4 - 5200000*p**2*q*r**5 + 6750*p**8*q**2*s + 96350*p**5*q**4*s + 346000*p**2*q**6*s - 20250*p**9*r*s - 459900*p**6*q**2*r*s - 1828750*p**3*q**4*r*s + 2930000*q**6*r*s + 594000*p**7*r**2*s + 4301250*p**4*q**2*r**2*s - 10906250*p*q**4*r**2*s - 5252000*p**5*r**3*s + 1450000*p**2*q**2*r**3*s + 12800000*p**3*r**4*s + 6500000*q**2*r**4*s - 74250*p**7*q*s**2 - 1418750*p**4*q**3*s**2 - 5956250*p*q**5*s**2 + 4297500*p**5*q*r*s**2 + 29906250*p**2*q**3*r*s**2 - 31500000*p**3*q*r**2*s**2 - 12500000*q**3*r**2*s**2 - 35000000*p*q*r**3*s**2 - 1350000*p**6*s**3 - 6093750*p**3*q**2*s**3 - 17500000*q**4*s**3 + 7031250*p**4*r*s**3 + 127812500*p*q**2*r*s**3 - 18750000*p**2*r**2*s**3 + 162500000*r**3*s**3 - 107812500*p**2*q*s**4 - 460937500*q*r*s**4 + 214843750*p*s**5
b[1][2] = -1950*p**4*q**5 - 14100*p*q**7 + 14350*p**5*q**3*r + 125600*p**2*q**5*r - 27900*p**6*q*r**2 - 402250*p**3*q**3*r**2 - 288250*q**5*r**2 + 436000*p**4*q*r**3 + 1345000*p*q**3*r**3 - 1400000*p**2*q*r**4 - 9450*p**6*q**2*s + 1250*p**3*q**4*s + 465000*q**6*s + 49950*p**7*r*s + 302500*p**4*q**2*r*s - 1718750*p*q**4*r*s - 834000*p**5*r**2*s - 437500*p**2*q**2*r**2*s + 3100000*p**3*r**3*s + 1750000*q**2*r**3*s + 292500*p**5*q*s**2 + 1937500*p**2*q**3*s**2 - 3343750*p**3*q*r*s**2 - 1875000*q**3*r*s**2 - 8125000*p*q*r**2*s**2 + 1406250*p**4*s**3 + 12343750*p*q**2*s**3 - 5312500*p**2*r*s**3 + 43750000*r**2*s**3 - 74218750*q*s**4
b[1][1] = 300*p**5*q**3 + 2150*p**2*q**5 - 1350*p**6*q*r - 21500*p**3*q**3*r - 61500*q**5*r + 42000*p**4*q*r**2 + 290000*p*q**3*r**2 - 300000*p**2*q*r**3 + 4050*p**7*s + 45000*p**4*q**2*s + 125000*p*q**4*s - 108000*p**5*r*s - 643750*p**2*q**2*r*s + 700000*p**3*r**2*s + 375000*q**2*r**2*s + 93750*p**3*q*s**2 + 312500*q**3*s**2 - 1875000*p*q*r*s**2 + 1406250*p**2*s**3 + 9375000*r*s**3
b[1][0] = -1250*p**3*q**3 - 9000*q**5 + 4500*p**4*q*r + 46250*p*q**3*r - 50000*p**2*q*r**2 - 6750*p**5*s - 43750*p**2*q**2*s + 75000*p**3*r*s + 62500*q**2*r*s - 156250*p*q*s**2 + 1562500*s**3
b[2][5] = 200*p**6*q**11 - 250*p**3*q**13 - 10800*q**15 - 3900*p**7*q**9*r - 3325*p**4*q**11*r + 181800*p*q**13*r + 26950*p**8*q**7*r**2 + 69625*p**5*q**9*r**2 - 1214450*p**2*q**11*r**2 - 78725*p**9*q**5*r**3 - 368675*p**6*q**7*r**3 + 4166325*p**3*q**9*r**3 + 1131100*q**11*r**3 + 73400*p**10*q**3*r**4 + 661950*p**7*q**5*r**4 - 9151950*p**4*q**7*r**4 - 16633075*p*q**9*r**4 + 36000*p**11*q*r**5 + 135600*p**8*q**3*r**5 + 17321400*p**5*q**5*r**5 + 85338300*p**2*q**7*r**5 - 832000*p**9*q*r**6 - 21379200*p**6*q**3*r**6 - 176044000*p**3*q**5*r**6 - 1410000*q**7*r**6 + 6528000*p**7*q*r**7 + 129664000*p**4*q**3*r**7 + 47344000*p*q**5*r**7 - 21504000*p**5*q*r**8 - 115200000*p**2*q**3*r**8 + 25600000*p**3*q*r**9 + 64000000*q**3*r**9 + 15700*p**8*q**8*s + 120525*p**5*q**10*s + 113250*p**2*q**12*s - 196900*p**9*q**6*r*s - 1776925*p**6*q**8*r*s - 3062475*p**3*q**10*r*s - 4153500*q**12*r*s + 857925*p**10*q**4*r**2*s + 10562775*p**7*q**6*r**2*s + 34866250*p**4*q**8*r**2*s + 73486750*p*q**10*r**2*s - 1333800*p**11*q**2*r**3*s - 29212625*p**8*q**4*r**3*s - 168729675*p**5*q**6*r**3*s - 427230750*p**2*q**8*r**3*s + 108000*p**12*r**4*s + 30384200*p**9*q**2*r**4*s + 324535100*p**6*q**4*r**4*s + 952666750*p**3*q**6*r**4*s - 38076875*q**8*r**4*s - 4296000*p**10*r**5*s - 213606400*p**7*q**2*r**5*s - 842060000*p**4*q**4*r**5*s - 95285000*p*q**6*r**5*s + 61184000*p**8*r**6*s + 567520000*p**5*q**2*r**6*s + 547000000*p**2*q**4*r**6*s - 390912000*p**6*r**7*s - 812800000*p**3*q**2*r**7*s - 924000000*q**4*r**7*s + 1152000000*p**4*r**8*s + 800000000*p*q**2*r**8*s - 1280000000*p**2*r**9*s + 141750*p**10*q**5*s**2 - 31500*p**7*q**7*s**2 - 11325000*p**4*q**9*s**2 - 31687500*p*q**11*s**2 - 1293975*p**11*q**3*r*s**2 - 4803800*p**8*q**5*r*s**2 + 71398250*p**5*q**7*r*s**2 + 227625000*p**2*q**9*r*s**2 + 3256200*p**12*q*r**2*s**2 + 43870125*p**9*q**3*r**2*s**2 + 64581500*p**6*q**5*r**2*s**2 + 56090625*p**3*q**7*r**2*s**2 + 260218750*q**9*r**2*s**2 - 74610000*p**10*q*r**3*s**2 - 662186500*p**7*q**3*r**3*s**2 - 1987747500*p**4*q**5*r**3*s**2 - 811928125*p*q**7*r**3*s**2 + 471286000*p**8*q*r**4*s**2 + 2106040000*p**5*q**3*r**4*s**2 + 792687500*p**2*q**5*r**4*s**2 - 135120000*p**6*q*r**5*s**2 + 2479000000*p**3*q**3*r**5*s**2 + 5242250000*q**5*r**5*s**2 - 6400000000*p**4*q*r**6*s**2 - 8620000000*p*q**3*r**6*s**2 + 13280000000*p**2*q*r**7*s**2 + 1600000000*q*r**8*s**2 + 273375*p**12*q**2*s**3 - 13612500*p**9*q**4*s**3 - 177250000*p**6*q**6*s**3 - 511015625*p**3*q**8*s**3 - 320937500*q**10*s**3 - 2770200*p**13*r*s**3 + 12595500*p**10*q**2*r*s**3 + 543950000*p**7*q**4*r*s**3 + 1612281250*p**4*q**6*r*s**3 + 968125000*p*q**8*r*s**3 + 77031000*p**11*r**2*s**3 + 373218750*p**8*q**2*r**2*s**3 + 1839765625*p**5*q**4*r**2*s**3 + 1818515625*p**2*q**6*r**2*s**3 - 776745000*p**9*r**3*s**3 - 6861075000*p**6*q**2*r**3*s**3 - 20014531250*p**3*q**4*r**3*s**3 - 13747812500*q**6*r**3*s**3 + 3768000000*p**7*r**4*s**3 + 35365000000*p**4*q**2*r**4*s**3 + 34441875000*p*q**4*r**4*s**3 - 9628000000*p**5*r**5*s**3 - 63230000000*p**2*q**2*r**5*s**3 + 13600000000*p**3*r**6*s**3 - 15000000000*q**2*r**6*s**3 - 10400000000*p*r**7*s**3 - 45562500*p**11*q*s**4 - 525937500*p**8*q**3*s**4 - 1364218750*p**5*q**5*s**4 - 1382812500*p**2*q**7*s**4 + 572062500*p**9*q*r*s**4 + 2473515625*p**6*q**3*r*s**4 + 13192187500*p**3*q**5*r*s**4 + 12703125000*q**7*r*s**4 - 451406250*p**7*q*r**2*s**4 - 18153906250*p**4*q**3*r**2*s**4 - 36908203125*p*q**5*r**2*s**4 - 9069375000*p**5*q*r**3*s**4 + 79957812500*p**2*q**3*r**3*s**4 + 5512500000*p**3*q*r**4*s**4 + 50656250000*q**3*r**4*s**4 + 74750000000*p*q*r**5*s**4 + 56953125*p**10*s**5 + 1381640625*p**7*q**2*s**5 - 781250000*p**4*q**4*s**5 + 878906250*p*q**6*s**5 - 2655703125*p**8*r*s**5 - 3223046875*p**5*q**2*r*s**5 - 35117187500*p**2*q**4*r*s**5 + 26573437500*p**6*r**2*s**5 + 14785156250*p**3*q**2*r**2*s**5 - 52050781250*q**4*r**2*s**5 - 103062500000*p**4*r**3*s**5 - 281796875000*p*q**2*r**3*s**5 + 146875000000*p**2*r**4*s**5 - 37500000000*r**5*s**5 - 8789062500*p**6*q*s**6 - 3906250000*p**3*q**3*s**6 + 1464843750*q**5*s**6 + 102929687500*p**4*q*r*s**6 + 297119140625*p*q**3*r*s**6 - 217773437500*p**2*q*r**2*s**6 + 167968750000*q*r**3*s**6 + 10986328125*p**5*s**7 + 98876953125*p**2*q**2*s**7 - 188964843750*p**3*r*s**7 - 278320312500*q**2*r*s**7 + 517578125000*p*r**2*s**7 - 610351562500*p*q*s**8 + 762939453125*s**9
b[2][4] = -200*p**7*q**9 + 1850*p**4*q**11 + 21600*p*q**13 + 3200*p**8*q**7*r - 19200*p**5*q**9*r - 316350*p**2*q**11*r - 19050*p**9*q**5*r**2 + 37400*p**6*q**7*r**2 + 1759250*p**3*q**9*r**2 + 440100*q**11*r**2 + 48750*p**10*q**3*r**3 + 190200*p**7*q**5*r**3 - 4604200*p**4*q**7*r**3 - 6072800*p*q**9*r**3 - 43200*p**11*q*r**4 - 834500*p**8*q**3*r**4 + 4916000*p**5*q**5*r**4 + 27926850*p**2*q**7*r**4 + 969600*p**9*q*r**5 + 2467200*p**6*q**3*r**5 - 45393200*p**3*q**5*r**5 - 5399500*q**7*r**5 - 7283200*p**7*q*r**6 + 10536000*p**4*q**3*r**6 + 41656000*p*q**5*r**6 + 22784000*p**5*q*r**7 - 35200000*p**2*q**3*r**7 - 25600000*p**3*q*r**8 + 96000000*q**3*r**8 - 3000*p**9*q**6*s + 40400*p**6*q**8*s + 136550*p**3*q**10*s - 1647000*q**12*s + 40500*p**10*q**4*r*s - 173600*p**7*q**6*r*s - 126500*p**4*q**8*r*s + 23969250*p*q**10*r*s - 153900*p**11*q**2*r**2*s - 486150*p**8*q**4*r**2*s - 4115800*p**5*q**6*r**2*s - 112653250*p**2*q**8*r**2*s + 129600*p**12*r**3*s + 2683350*p**9*q**2*r**3*s + 10906650*p**6*q**4*r**3*s + 187289500*p**3*q**6*r**3*s + 44098750*q**8*r**3*s - 4384800*p**10*r**4*s - 35660800*p**7*q**2*r**4*s - 175420000*p**4*q**4*r**4*s - 426538750*p*q**6*r**4*s + 60857600*p**8*r**5*s + 349436000*p**5*q**2*r**5*s + 900600000*p**2*q**4*r**5*s - 429568000*p**6*r**6*s - 1511200000*p**3*q**2*r**6*s - 1286000000*q**4*r**6*s + 1472000000*p**4*r**7*s + 1440000000*p*q**2*r**7*s - 1920000000*p**2*r**8*s - 36450*p**11*q**3*s**2 - 188100*p**8*q**5*s**2 - 5504750*p**5*q**7*s**2 - 37968750*p**2*q**9*s**2 + 255150*p**12*q*r*s**2 + 2754000*p**9*q**3*r*s**2 + 49196500*p**6*q**5*r*s**2 + 323587500*p**3*q**7*r*s**2 - 83250000*q**9*r*s**2 - 465750*p**10*q*r**2*s**2 - 31881500*p**7*q**3*r**2*s**2 - 415585000*p**4*q**5*r**2*s**2 + 1054775000*p*q**7*r**2*s**2 - 96823500*p**8*q*r**3*s**2 - 701490000*p**5*q**3*r**3*s**2 - 2953531250*p**2*q**5*r**3*s**2 + 1454560000*p**6*q*r**4*s**2 + 7670500000*p**3*q**3*r**4*s**2 + 5661062500*q**5*r**4*s**2 - 7785000000*p**4*q*r**5*s**2 - 9450000000*p*q**3*r**5*s**2 + 14000000000*p**2*q*r**6*s**2 + 2400000000*q*r**7*s**2 - 437400*p**13*s**3 - 10145250*p**10*q**2*s**3 - 121912500*p**7*q**4*s**3 - 576531250*p**4*q**6*s**3 - 528593750*p*q**8*s**3 + 12939750*p**11*r*s**3 + 313368750*p**8*q**2*r*s**3 + 2171812500*p**5*q**4*r*s**3 + 2381718750*p**2*q**6*r*s**3 - 124638750*p**9*r**2*s**3 - 3001575000*p**6*q**2*r**2*s**3 - 12259375000*p**3*q**4*r**2*s**3 - 9985312500*q**6*r**2*s**3 + 384000000*p**7*r**3*s**3 + 13997500000*p**4*q**2*r**3*s**3 + 20749531250*p*q**4*r**3*s**3 - 553500000*p**5*r**4*s**3 - 41835000000*p**2*q**2*r**4*s**3 + 5420000000*p**3*r**5*s**3 - 16300000000*q**2*r**5*s**3 - 17600000000*p*r**6*s**3 - 7593750*p**9*q*s**4 + 289218750*p**6*q**3*s**4 + 3591406250*p**3*q**5*s**4 + 5992187500*q**7*s**4 + 658125000*p**7*q*r*s**4 - 269531250*p**4*q**3*r*s**4 - 15882812500*p*q**5*r*s**4 - 4785000000*p**5*q*r**2*s**4 + 54375781250*p**2*q**3*r**2*s**4 - 5668750000*p**3*q*r**3*s**4 + 35867187500*q**3*r**3*s**4 + 113875000000*p*q*r**4*s**4 - 544218750*p**8*s**5 - 5407031250*p**5*q**2*s**5 - 14277343750*p**2*q**4*s**5 + 5421093750*p**6*r*s**5 - 24941406250*p**3*q**2*r*s**5 - 25488281250*q**4*r*s**5 - 11500000000*p**4*r**2*s**5 - 231894531250*p*q**2*r**2*s**5 - 6250000000*p**2*r**3*s**5 - 43750000000*r**4*s**5 + 35449218750*p**4*q*s**6 + 137695312500*p*q**3*s**6 + 34667968750*p**2*q*r*s**6 + 202148437500*q*r**2*s**6 - 33691406250*p**3*s**7 - 214843750000*q**2*s**7 - 31738281250*p*r*s**7
b[2][3] = -800*p**5*q**9 - 5400*p**2*q**11 + 5800*p**6*q**7*r + 48750*p**3*q**9*r + 16200*q**11*r - 3000*p**7*q**5*r**2 - 108350*p**4*q**7*r**2 - 263250*p*q**9*r**2 - 60700*p**8*q**3*r**3 - 386250*p**5*q**5*r**3 + 253100*p**2*q**7*r**3 + 127800*p**9*q*r**4 + 2326700*p**6*q**3*r**4 + 6565550*p**3*q**5*r**4 - 705750*q**7*r**4 - 2903200*p**7*q*r**5 - 21218000*p**4*q**3*r**5 + 1057000*p*q**5*r**5 + 20368000*p**5*q*r**6 + 33000000*p**2*q**3*r**6 - 43200000*p**3*q*r**7 + 52000000*q**3*r**7 + 6200*p**7*q**6*s + 188250*p**4*q**8*s + 931500*p*q**10*s - 73800*p**8*q**4*r*s - 1466850*p**5*q**6*r*s - 6894000*p**2*q**8*r*s + 315900*p**9*q**2*r**2*s + 4547000*p**6*q**4*r**2*s + 20362500*p**3*q**6*r**2*s + 15018750*q**8*r**2*s - 653400*p**10*r**3*s - 13897550*p**7*q**2*r**3*s - 76757500*p**4*q**4*r**3*s - 124207500*p*q**6*r**3*s + 18567600*p**8*r**4*s + 175911000*p**5*q**2*r**4*s + 253787500*p**2*q**4*r**4*s - 183816000*p**6*r**5*s - 706900000*p**3*q**2*r**5*s - 665750000*q**4*r**5*s + 740000000*p**4*r**6*s + 890000000*p*q**2*r**6*s - 1040000000*p**2*r**7*s - 763000*p**6*q**5*s**2 - 12375000*p**3*q**7*s**2 - 40500000*q**9*s**2 + 364500*p**10*q*r*s**2 + 15537000*p**7*q**3*r*s**2 + 154392500*p**4*q**5*r*s**2 + 372206250*p*q**7*r*s**2 - 25481250*p**8*q*r**2*s**2 - 386300000*p**5*q**3*r**2*s**2 - 996343750*p**2*q**5*r**2*s**2 + 459872500*p**6*q*r**3*s**2 + 2943937500*p**3*q**3*r**3*s**2 + 2437781250*q**5*r**3*s**2 - 2883750000*p**4*q*r**4*s**2 - 4343750000*p*q**3*r**4*s**2 + 5495000000*p**2*q*r**5*s**2 + 1300000000*q*r**6*s**2 - 364500*p**11*s**3 - 13668750*p**8*q**2*s**3 - 113406250*p**5*q**4*s**3 - 159062500*p**2*q**6*s**3 + 13972500*p**9*r*s**3 + 61537500*p**6*q**2*r*s**3 - 1622656250*p**3*q**4*r*s**3 - 2720625000*q**6*r*s**3 - 201656250*p**7*r**2*s**3 + 1949687500*p**4*q**2*r**2*s**3 + 4979687500*p*q**4*r**2*s**3 + 497125000*p**5*r**3*s**3 - 11150625000*p**2*q**2*r**3*s**3 + 2982500000*p**3*r**4*s**3 - 6612500000*q**2*r**4*s**3 - 10450000000*p*r**5*s**3 + 126562500*p**7*q*s**4 + 1443750000*p**4*q**3*s**4 + 281250000*p*q**5*s**4 - 1648125000*p**5*q*r*s**4 + 11271093750*p**2*q**3*r*s**4 - 4785156250*p**3*q*r**2*s**4 + 8808593750*q**3*r**2*s**4 + 52390625000*p*q*r**3*s**4 - 611718750*p**6*s**5 - 13027343750*p**3*q**2*s**5 - 1464843750*q**4*s**5 + 6492187500*p**4*r*s**5 - 65351562500*p*q**2*r*s**5 - 13476562500*p**2*r**2*s**5 - 24218750000*r**3*s**5 + 41992187500*p**2*q*s**6 + 69824218750*q*r*s**6 - 34179687500*p*s**7
b[2][2] = -1000*p**6*q**7 - 5150*p**3*q**9 + 10800*q**11 + 11000*p**7*q**5*r + 66450*p**4*q**7*r - 127800*p*q**9*r - 41250*p**8*q**3*r**2 - 368400*p**5*q**5*r**2 + 204200*p**2*q**7*r**2 + 54000*p**9*q*r**3 + 1040950*p**6*q**3*r**3 + 2096500*p**3*q**5*r**3 + 200000*q**7*r**3 - 1140000*p**7*q*r**4 - 7691000*p**4*q**3*r**4 - 2281000*p*q**5*r**4 + 7296000*p**5*q*r**5 + 13300000*p**2*q**3*r**5 - 14400000*p**3*q*r**6 + 14000000*q**3*r**6 - 9000*p**8*q**4*s + 52100*p**5*q**6*s + 710250*p**2*q**8*s + 67500*p**9*q**2*r*s - 256100*p**6*q**4*r*s - 5753000*p**3*q**6*r*s + 292500*q**8*r*s - 162000*p**10*r**2*s - 1432350*p**7*q**2*r**2*s + 5410000*p**4*q**4*r**2*s - 7408750*p*q**6*r**2*s + 4401000*p**8*r**3*s + 24185000*p**5*q**2*r**3*s + 20781250*p**2*q**4*r**3*s - 43012000*p**6*r**4*s - 146300000*p**3*q**2*r**4*s - 165875000*q**4*r**4*s + 182000000*p**4*r**5*s + 250000000*p*q**2*r**5*s - 280000000*p**2*r**6*s + 60750*p**10*q*s**2 + 2414250*p**7*q**3*s**2 + 15770000*p**4*q**5*s**2 + 15825000*p*q**7*s**2 - 6021000*p**8*q*r*s**2 - 62252500*p**5*q**3*r*s**2 - 74718750*p**2*q**5*r*s**2 + 90888750*p**6*q*r**2*s**2 + 471312500*p**3*q**3*r**2*s**2 + 525875000*q**5*r**2*s**2 - 539375000*p**4*q*r**3*s**2 - 1030000000*p*q**3*r**3*s**2 + 1142500000*p**2*q*r**4*s**2 + 350000000*q*r**5*s**2 - 303750*p**9*s**3 - 35943750*p**6*q**2*s**3 - 331875000*p**3*q**4*s**3 - 505937500*q**6*s**3 + 8437500*p**7*r*s**3 + 530781250*p**4*q**2*r*s**3 + 1150312500*p*q**4*r*s**3 - 154500000*p**5*r**2*s**3 - 2059062500*p**2*q**2*r**2*s**3 + 1150000000*p**3*r**3*s**3 - 1343750000*q**2*r**3*s**3 - 2900000000*p*r**4*s**3 + 30937500*p**5*q*s**4 + 1166406250*p**2*q**3*s**4 - 1496875000*p**3*q*r*s**4 + 1296875000*q**3*r*s**4 + 10640625000*p*q*r**2*s**4 - 281250000*p**4*s**5 - 9746093750*p*q**2*s**5 + 1269531250*p**2*r*s**5 - 7421875000*r**2*s**5 + 15625000000*q*s**6
b[2][1] = -1600*p**4*q**7 - 10800*p*q**9 + 9800*p**5*q**5*r + 80550*p**2*q**7*r - 4600*p**6*q**3*r**2 - 112700*p**3*q**5*r**2 + 40500*q**7*r**2 - 34200*p**7*q*r**3 - 279500*p**4*q**3*r**3 - 665750*p*q**5*r**3 + 632000*p**5*q*r**4 + 3200000*p**2*q**3*r**4 - 2800000*p**3*q*r**5 + 3000000*q**3*r**5 - 18600*p**6*q**4*s - 51750*p**3*q**6*s + 405000*q**8*s + 21600*p**7*q**2*r*s - 122500*p**4*q**4*r*s - 2891250*p*q**6*r*s + 156600*p**8*r**2*s + 1569750*p**5*q**2*r**2*s + 6943750*p**2*q**4*r**2*s - 3774000*p**6*r**3*s - 27100000*p**3*q**2*r**3*s - 30187500*q**4*r**3*s + 28000000*p**4*r**4*s + 52500000*p*q**2*r**4*s - 60000000*p**2*r**5*s - 81000*p**8*q*s**2 - 240000*p**5*q**3*s**2 + 937500*p**2*q**5*s**2 + 3273750*p**6*q*r*s**2 + 30406250*p**3*q**3*r*s**2 + 55687500*q**5*r*s**2 - 42187500*p**4*q*r**2*s**2 - 112812500*p*q**3*r**2*s**2 + 152500000*p**2*q*r**3*s**2 + 75000000*q*r**4*s**2 - 4218750*p**4*q**2*s**3 + 15156250*p*q**4*s**3 + 5906250*p**5*r*s**3 - 206562500*p**2*q**2*r*s**3 + 107500000*p**3*r**2*s**3 - 159375000*q**2*r**2*s**3 - 612500000*p*r**3*s**3 + 135937500*p**3*q*s**4 + 46875000*q**3*s**4 + 1175781250*p*q*r*s**4 - 292968750*p**2*s**5 - 1367187500*r*s**5
b[2][0] = -800*p**5*q**5 - 5400*p**2*q**7 + 6000*p**6*q**3*r + 51700*p**3*q**5*r + 27000*q**7*r - 10800*p**7*q*r**2 - 163250*p**4*q**3*r**2 - 285750*p*q**5*r**2 + 192000*p**5*q*r**3 + 1000000*p**2*q**3*r**3 - 800000*p**3*q*r**4 + 500000*q**3*r**4 - 10800*p**7*q**2*s - 57500*p**4*q**4*s + 67500*p*q**6*s + 32400*p**8*r*s + 279000*p**5*q**2*r*s - 131250*p**2*q**4*r*s - 729000*p**6*r**2*s - 4100000*p**3*q**2*r**2*s - 5343750*q**4*r**2*s + 5000000*p**4*r**3*s + 10000000*p*q**2*r**3*s - 10000000*p**2*r**4*s + 641250*p**6*q*s**2 + 5812500*p**3*q**3*s**2 + 10125000*q**5*s**2 - 7031250*p**4*q*r*s**2 - 20625000*p*q**3*r*s**2 + 17500000*p**2*q*r**2*s**2 + 12500000*q*r**3*s**2 - 843750*p**5*s**3 - 19375000*p**2*q**2*s**3 + 30000000*p**3*r*s**3 - 20312500*q**2*r*s**3 - 112500000*p*r**2*s**3 + 183593750*p*q*s**4 - 292968750*s**5
b[3][5] = 500*p**11*q**6 + 9875*p**8*q**8 + 42625*p**5*q**10 - 35000*p**2*q**12 - 4500*p**12*q**4*r - 108375*p**9*q**6*r - 516750*p**6*q**8*r + 1110500*p**3*q**10*r + 2730000*q**12*r + 10125*p**13*q**2*r**2 + 358250*p**10*q**4*r**2 + 1908625*p**7*q**6*r**2 - 11744250*p**4*q**8*r**2 - 43383250*p*q**10*r**2 - 313875*p**11*q**2*r**3 - 2074875*p**8*q**4*r**3 + 52094750*p**5*q**6*r**3 + 264567500*p**2*q**8*r**3 + 796125*p**9*q**2*r**4 - 92486250*p**6*q**4*r**4 - 757957500*p**3*q**6*r**4 - 29354375*q**8*r**4 + 60970000*p**7*q**2*r**5 + 1112462500*p**4*q**4*r**5 + 571094375*p*q**6*r**5 - 685290000*p**5*q**2*r**6 - 2037800000*p**2*q**4*r**6 + 2279600000*p**3*q**2*r**7 + 849000000*q**4*r**7 - 1480000000*p*q**2*r**8 + 13500*p**13*q**3*s + 363000*p**10*q**5*s + 2861250*p**7*q**7*s + 8493750*p**4*q**9*s + 17031250*p*q**11*s - 60750*p**14*q*r*s - 2319750*p**11*q**3*r*s - 22674250*p**8*q**5*r*s - 74368750*p**5*q**7*r*s - 170578125*p**2*q**9*r*s + 2760750*p**12*q*r**2*s + 46719000*p**9*q**3*r**2*s + 163356375*p**6*q**5*r**2*s + 360295625*p**3*q**7*r**2*s - 195990625*q**9*r**2*s - 37341750*p**10*q*r**3*s - 194739375*p**7*q**3*r**3*s - 105463125*p**4*q**5*r**3*s - 415825000*p*q**7*r**3*s + 90180000*p**8*q*r**4*s - 990552500*p**5*q**3*r**4*s + 3519212500*p**2*q**5*r**4*s + 1112220000*p**6*q*r**5*s - 4508750000*p**3*q**3*r**5*s - 8159500000*q**5*r**5*s - 4356000000*p**4*q*r**6*s + 14615000000*p*q**3*r**6*s - 2160000000*p**2*q*r**7*s + 91125*p**15*s**2 + 3290625*p**12*q**2*s**2 + 35100000*p**9*q**4*s**2 + 175406250*p**6*q**6*s**2 + 629062500*p**3*q**8*s**2 + 910937500*q**10*s**2 - 5710500*p**13*r*s**2 - 100423125*p**10*q**2*r*s**2 - 604743750*p**7*q**4*r*s**2 - 2954843750*p**4*q**6*r*s**2 - 4587578125*p*q**8*r*s**2 + 116194500*p**11*r**2*s**2 + 1280716250*p**8*q**2*r**2*s**2 + 7401190625*p**5*q**4*r**2*s**2 + 11619937500*p**2*q**6*r**2*s**2 - 952173125*p**9*r**3*s**2 - 6519712500*p**6*q**2*r**3*s**2 - 10238593750*p**3*q**4*r**3*s**2 + 29984609375*q**6*r**3*s**2 + 2558300000*p**7*r**4*s**2 + 16225000000*p**4*q**2*r**4*s**2 - 64994140625*p*q**4*r**4*s**2 + 4202250000*p**5*r**5*s**2 + 46925000000*p**2*q**2*r**5*s**2 - 28950000000*p**3*r**6*s**2 - 1000000000*q**2*r**6*s**2 + 37000000000*p*r**7*s**2 - 48093750*p**11*q*s**3 - 673359375*p**8*q**3*s**3 - 2170312500*p**5*q**5*s**3 - 2466796875*p**2*q**7*s**3 + 647578125*p**9*q*r*s**3 + 597031250*p**6*q**3*r*s**3 - 7542578125*p**3*q**5*r*s**3 - 41125000000*q**7*r*s**3 - 2175828125*p**7*q*r**2*s**3 - 7101562500*p**4*q**3*r**2*s**3 + 100596875000*p*q**5*r**2*s**3 - 8984687500*p**5*q*r**3*s**3 - 120070312500*p**2*q**3*r**3*s**3 + 57343750000*p**3*q*r**4*s**3 + 9500000000*q**3*r**4*s**3 - 342875000000*p*q*r**5*s**3 + 400781250*p**10*s**4 + 8531250000*p**7*q**2*s**4 + 34033203125*p**4*q**4*s**4 + 42724609375*p*q**6*s**4 - 6289453125*p**8*r*s**4 - 24037109375*p**5*q**2*r*s**4 - 62626953125*p**2*q**4*r*s**4 + 17299218750*p**6*r**2*s**4 + 108357421875*p**3*q**2*r**2*s**4 - 55380859375*q**4*r**2*s**4 + 105648437500*p**4*r**3*s**4 + 1204228515625*p*q**2*r**3*s**4 - 365000000000*p**2*r**4*s**4 + 184375000000*r**5*s**4 - 32080078125*p**6*q*s**5 - 98144531250*p**3*q**3*s**5 + 93994140625*q**5*s**5 - 178955078125*p**4*q*r*s**5 - 1299804687500*p*q**3*r*s**5 + 332421875000*p**2*q*r**2*s**5 - 1195312500000*q*r**3*s**5 + 72021484375*p**5*s**6 + 323486328125*p**2*q**2*s**6 + 682373046875*p**3*r*s**6 + 2447509765625*q**2*r*s**6 - 3011474609375*p*r**2*s**6 + 3051757812500*p*q*s**7 - 7629394531250*s**8
b[3][4] = 1500*p**9*q**6 + 69625*p**6*q**8 + 590375*p**3*q**10 + 1035000*q**12 - 13500*p**10*q**4*r - 760625*p**7*q**6*r - 7904500*p**4*q**8*r - 18169250*p*q**10*r + 30375*p**11*q**2*r**2 + 2628625*p**8*q**4*r**2 + 37879000*p**5*q**6*r**2 + 121367500*p**2*q**8*r**2 - 2699250*p**9*q**2*r**3 - 76776875*p**6*q**4*r**3 - 403583125*p**3*q**6*r**3 - 78865625*q**8*r**3 + 60907500*p**7*q**2*r**4 + 735291250*p**4*q**4*r**4 + 781142500*p*q**6*r**4 - 558270000*p**5*q**2*r**5 - 2150725000*p**2*q**4*r**5 + 2015400000*p**3*q**2*r**6 + 1181000000*q**4*r**6 - 2220000000*p*q**2*r**7 + 40500*p**11*q**3*s + 1376500*p**8*q**5*s + 9953125*p**5*q**7*s + 9765625*p**2*q**9*s - 182250*p**12*q*r*s - 8859000*p**9*q**3*r*s - 82854500*p**6*q**5*r*s - 71511250*p**3*q**7*r*s + 273631250*q**9*r*s + 10233000*p**10*q*r**2*s + 179627500*p**7*q**3*r**2*s + 25164375*p**4*q**5*r**2*s - 2927290625*p*q**7*r**2*s - 171305000*p**8*q*r**3*s - 544768750*p**5*q**3*r**3*s + 7583437500*p**2*q**5*r**3*s + 1139860000*p**6*q*r**4*s - 6489375000*p**3*q**3*r**4*s - 9625375000*q**5*r**4*s - 1838000000*p**4*q*r**5*s + 19835000000*p*q**3*r**5*s - 3240000000*p**2*q*r**6*s + 273375*p**13*s**2 + 9753750*p**10*q**2*s**2 + 82575000*p**7*q**4*s**2 + 202265625*p**4*q**6*s**2 + 556093750*p*q**8*s**2 - 11552625*p**11*r*s**2 - 115813125*p**8*q**2*r*s**2 + 630590625*p**5*q**4*r*s**2 + 1347015625*p**2*q**6*r*s**2 + 157578750*p**9*r**2*s**2 - 689206250*p**6*q**2*r**2*s**2 - 4299609375*p**3*q**4*r**2*s**2 + 23896171875*q**6*r**2*s**2 - 1022437500*p**7*r**3*s**2 + 6648125000*p**4*q**2*r**3*s**2 - 52895312500*p*q**4*r**3*s**2 + 4401750000*p**5*r**4*s**2 + 26500000000*p**2*q**2*r**4*s**2 - 22125000000*p**3*r**5*s**2 - 1500000000*q**2*r**5*s**2 + 55500000000*p*r**6*s**2 - 137109375*p**9*q*s**3 - 1955937500*p**6*q**3*s**3 - 6790234375*p**3*q**5*s**3 - 16996093750*q**7*s**3 + 2146218750*p**7*q*r*s**3 + 6570312500*p**4*q**3*r*s**3 + 39918750000*p*q**5*r*s**3 - 7673281250*p**5*q*r**2*s**3 - 52000000000*p**2*q**3*r**2*s**3 + 50796875000*p**3*q*r**3*s**3 + 18750000000*q**3*r**3*s**3 - 399875000000*p*q*r**4*s**3 + 780468750*p**8*s**4 + 14455078125*p**5*q**2*s**4 + 10048828125*p**2*q**4*s**4 - 15113671875*p**6*r*s**4 + 39298828125*p**3*q**2*r*s**4 - 52138671875*q**4*r*s**4 + 45964843750*p**4*r**2*s**4 + 914414062500*p*q**2*r**2*s**4 + 1953125000*p**2*r**3*s**4 + 334375000000*r**4*s**4 - 149169921875*p**4*q*s**5 - 459716796875*p*q**3*s**5 - 325585937500*p**2*q*r*s**5 - 1462890625000*q*r**2*s**5 + 296630859375*p**3*s**6 + 1324462890625*q**2*s**6 + 307617187500*p*r*s**6
b[3][3] = -20750*p**7*q**6 - 290125*p**4*q**8 - 993000*p*q**10 + 146125*p**8*q**4*r + 2721500*p**5*q**6*r + 11833750*p**2*q**8*r - 237375*p**9*q**2*r**2 - 8167500*p**6*q**4*r**2 - 54605625*p**3*q**6*r**2 - 23802500*q**8*r**2 + 8927500*p**7*q**2*r**3 + 131184375*p**4*q**4*r**3 + 254695000*p*q**6*r**3 - 121561250*p**5*q**2*r**4 - 728003125*p**2*q**4*r**4 + 702550000*p**3*q**2*r**5 + 597312500*q**4*r**5 - 1202500000*p*q**2*r**6 - 194625*p**9*q**3*s - 1568875*p**6*q**5*s + 9685625*p**3*q**7*s + 74662500*q**9*s + 327375*p**10*q*r*s + 1280000*p**7*q**3*r*s - 123703750*p**4*q**5*r*s - 850121875*p*q**7*r*s - 7436250*p**8*q*r**2*s + 164820000*p**5*q**3*r**2*s + 2336659375*p**2*q**5*r**2*s + 32202500*p**6*q*r**3*s - 2429765625*p**3*q**3*r**3*s - 4318609375*q**5*r**3*s + 148000000*p**4*q*r**4*s + 9902812500*p*q**3*r**4*s - 1755000000*p**2*q*r**5*s + 1154250*p**11*s**2 + 36821250*p**8*q**2*s**2 + 372825000*p**5*q**4*s**2 + 1170921875*p**2*q**6*s**2 - 38913750*p**9*r*s**2 - 797071875*p**6*q**2*r*s**2 - 2848984375*p**3*q**4*r*s**2 + 7651406250*q**6*r*s**2 + 415068750*p**7*r**2*s**2 + 3151328125*p**4*q**2*r**2*s**2 - 17696875000*p*q**4*r**2*s**2 - 725968750*p**5*r**3*s**2 + 5295312500*p**2*q**2*r**3*s**2 - 8581250000*p**3*r**4*s**2 - 812500000*q**2*r**4*s**2 + 30062500000*p*r**5*s**2 - 110109375*p**7*q*s**3 - 1976562500*p**4*q**3*s**3 - 6329296875*p*q**5*s**3 + 2256328125*p**5*q*r*s**3 + 8554687500*p**2*q**3*r*s**3 + 12947265625*p**3*q*r**2*s**3 + 7984375000*q**3*r**2*s**3 - 167039062500*p*q*r**3*s**3 + 1181250000*p**6*s**4 + 17873046875*p**3*q**2*s**4 - 20449218750*q**4*s**4 - 16265625000*p**4*r*s**4 + 260869140625*p*q**2*r*s**4 + 21025390625*p**2*r**2*s**4 + 207617187500*r**3*s**4 - 207177734375*p**2*q*s**5 - 615478515625*q*r*s**5 + 301513671875*p*s**6
b[3][2] = 53125*p**5*q**6 + 425000*p**2*q**8 - 394375*p**6*q**4*r - 4301875*p**3*q**6*r - 3225000*q**8*r + 851250*p**7*q**2*r**2 + 16910625*p**4*q**4*r**2 + 44210000*p*q**6*r**2 - 20474375*p**5*q**2*r**3 - 147190625*p**2*q**4*r**3 + 163975000*p**3*q**2*r**4 + 156812500*q**4*r**4 - 323750000*p*q**2*r**5 - 99375*p**7*q**3*s - 6395000*p**4*q**5*s - 49243750*p*q**7*s - 1164375*p**8*q*r*s + 4465625*p**5*q**3*r*s + 205546875*p**2*q**5*r*s + 12163750*p**6*q*r**2*s - 315546875*p**3*q**3*r**2*s - 946453125*q**5*r**2*s - 23500000*p**4*q*r**3*s + 2313437500*p*q**3*r**3*s - 472500000*p**2*q*r**4*s + 1316250*p**9*s**2 + 22715625*p**6*q**2*s**2 + 206953125*p**3*q**4*s**2 + 1220000000*q**6*s**2 - 20953125*p**7*r*s**2 - 277656250*p**4*q**2*r*s**2 - 3317187500*p*q**4*r*s**2 + 293734375*p**5*r**2*s**2 + 1351562500*p**2*q**2*r**2*s**2 - 2278125000*p**3*r**3*s**2 - 218750000*q**2*r**3*s**2 + 8093750000*p*r**4*s**2 - 9609375*p**5*q*s**3 + 240234375*p**2*q**3*s**3 + 2310546875*p**3*q*r*s**3 + 1171875000*q**3*r*s**3 - 33460937500*p*q*r**2*s**3 + 2185546875*p**4*s**4 + 32578125000*p*q**2*s**4 - 8544921875*p**2*r*s**4 + 58398437500*r**2*s**4 - 114013671875*q*s**5
b[3][1] = -16250*p**6*q**4 - 191875*p**3*q**6 - 495000*q**8 + 73125*p**7*q**2*r + 1437500*p**4*q**4*r + 5866250*p*q**6*r - 2043125*p**5*q**2*r**2 - 17218750*p**2*q**4*r**2 + 19106250*p**3*q**2*r**3 + 34015625*q**4*r**3 - 69375000*p*q**2*r**4 - 219375*p**8*q*s - 2846250*p**5*q**3*s - 8021875*p**2*q**5*s + 3420000*p**6*q*r*s - 1640625*p**3*q**3*r*s - 152468750*q**5*r*s + 3062500*p**4*q*r**2*s + 381171875*p*q**3*r**2*s - 101250000*p**2*q*r**3*s + 2784375*p**7*s**2 + 43515625*p**4*q**2*s**2 + 115625000*p*q**4*s**2 - 48140625*p**5*r*s**2 - 307421875*p**2*q**2*r*s**2 - 25781250*p**3*r**2*s**2 - 46875000*q**2*r**2*s**2 + 1734375000*p*r**3*s**2 - 128906250*p**3*q*s**3 + 339843750*q**3*s**3 - 4583984375*p*q*r*s**3 + 2236328125*p**2*s**4 + 12255859375*r*s**4
b[3][0] = 31875*p**4*q**4 + 255000*p*q**6 - 82500*p**5*q**2*r - 1106250*p**2*q**4*r + 1653125*p**3*q**2*r**2 + 5187500*q**4*r**2 - 11562500*p*q**2*r**3 - 118125*p**6*q*s - 3593750*p**3*q**3*s - 23812500*q**5*s + 4656250*p**4*q*r*s + 67109375*p*q**3*r*s - 16875000*p**2*q*r**2*s - 984375*p**5*s**2 - 19531250*p**2*q**2*s**2 - 37890625*p**3*r*s**2 - 7812500*q**2*r*s**2 + 289062500*p*r**2*s**2 - 529296875*p*q*s**3 + 2343750000*s**4
b[4][5] = 600*p**10*q**10 + 13850*p**7*q**12 + 106150*p**4*q**14 + 270000*p*q**16 - 9300*p**11*q**8*r - 234075*p**8*q**10*r - 1942825*p**5*q**12*r - 5319900*p**2*q**14*r + 52050*p**12*q**6*r**2 + 1481025*p**9*q**8*r**2 + 13594450*p**6*q**10*r**2 + 40062750*p**3*q**12*r**2 - 3569400*q**14*r**2 - 122175*p**13*q**4*r**3 - 4260350*p**10*q**6*r**3 - 45052375*p**7*q**8*r**3 - 142634900*p**4*q**10*r**3 + 54186350*p*q**12*r**3 + 97200*p**14*q**2*r**4 + 5284225*p**11*q**4*r**4 + 70389525*p**8*q**6*r**4 + 232732850*p**5*q**8*r**4 - 318849400*p**2*q**10*r**4 - 2046000*p**12*q**2*r**5 - 43874125*p**9*q**4*r**5 - 107411850*p**6*q**6*r**5 + 948310700*p**3*q**8*r**5 - 34763575*q**10*r**5 + 5915600*p**10*q**2*r**6 - 115887800*p**7*q**4*r**6 - 1649542400*p**4*q**6*r**6 + 224468875*p*q**8*r**6 + 120252800*p**8*q**2*r**7 + 1779902000*p**5*q**4*r**7 - 288250000*p**2*q**6*r**7 - 915200000*p**6*q**2*r**8 - 1164000000*p**3*q**4*r**8 - 444200000*q**6*r**8 + 2502400000*p**4*q**2*r**9 + 1984000000*p*q**4*r**9 - 2880000000*p**2*q**2*r**10 + 20700*p**12*q**7*s + 551475*p**9*q**9*s + 5194875*p**6*q**11*s + 18985000*p**3*q**13*s + 16875000*q**15*s - 218700*p**13*q**5*r*s - 6606475*p**10*q**7*r*s - 69770850*p**7*q**9*r*s - 285325500*p**4*q**11*r*s - 292005000*p*q**13*r*s + 694575*p**14*q**3*r**2*s + 26187750*p**11*q**5*r**2*s + 328992825*p**8*q**7*r**2*s + 1573292400*p**5*q**9*r**2*s + 1930043875*p**2*q**11*r**2*s - 583200*p**15*q*r**3*s - 37263225*p**12*q**3*r**3*s - 638579425*p**9*q**5*r**3*s - 3920212225*p**6*q**7*r**3*s - 6327336875*p**3*q**9*r**3*s + 440969375*q**11*r**3*s + 13446000*p**13*q*r**4*s + 462330325*p**10*q**3*r**4*s + 4509088275*p**7*q**5*r**4*s + 11709795625*p**4*q**7*r**4*s - 3579565625*p*q**9*r**4*s - 85033600*p**11*q*r**5*s - 2136801600*p**8*q**3*r**5*s - 12221575800*p**5*q**5*r**5*s + 9431044375*p**2*q**7*r**5*s + 10643200*p**9*q*r**6*s + 4565594000*p**6*q**3*r**6*s - 1778590000*p**3*q**5*r**6*s + 4842175000*q**7*r**6*s + 712320000*p**7*q*r**7*s - 16182000000*p**4*q**3*r**7*s - 21918000000*p*q**5*r**7*s - 742400000*p**5*q*r**8*s + 31040000000*p**2*q**3*r**8*s + 1280000000*p**3*q*r**9*s + 4800000000*q**3*r**9*s + 230850*p**14*q**4*s**2 + 7373250*p**11*q**6*s**2 + 85045625*p**8*q**8*s**2 + 399140625*p**5*q**10*s**2 + 565031250*p**2*q**12*s**2 - 1257525*p**15*q**2*r*s**2 - 52728975*p**12*q**4*r*s**2 - 743466375*p**9*q**6*r*s**2 - 4144915000*p**6*q**8*r*s**2 - 7102690625*p**3*q**10*r*s**2 - 1389937500*q**12*r*s**2 + 874800*p**16*r**2*s**2 + 89851275*p**13*q**2*r**2*s**2 + 1897236775*p**10*q**4*r**2*s**2 + 14144163000*p**7*q**6*r**2*s**2 + 31942921875*p**4*q**8*r**2*s**2 + 13305118750*p*q**10*r**2*s**2 - 23004000*p**14*r**3*s**2 - 1450715475*p**11*q**2*r**3*s**2 - 19427105000*p**8*q**4*r**3*s**2 - 70634028750*p**5*q**6*r**3*s**2 - 47854218750*p**2*q**8*r**3*s**2 + 204710400*p**12*r**4*s**2 + 10875135000*p**9*q**2*r**4*s**2 + 83618806250*p**6*q**4*r**4*s**2 + 62744500000*p**3*q**6*r**4*s**2 - 19806718750*q**8*r**4*s**2 - 757094800*p**10*r**5*s**2 - 37718030000*p**7*q**2*r**5*s**2 - 22479500000*p**4*q**4*r**5*s**2 + 91556093750*p*q**6*r**5*s**2 + 2306320000*p**8*r**6*s**2 + 55539600000*p**5*q**2*r**6*s**2 - 112851250000*p**2*q**4*r**6*s**2 - 10720000000*p**6*r**7*s**2 - 64720000000*p**3*q**2*r**7*s**2 - 59925000000*q**4*r**7*s**2 + 28000000000*p**4*r**8*s**2 + 28000000000*p*q**2*r**8*s**2 - 24000000000*p**2*r**9*s**2 + 820125*p**16*q*s**3 + 36804375*p**13*q**3*s**3 + 552225000*p**10*q**5*s**3 + 3357593750*p**7*q**7*s**3 + 7146562500*p**4*q**9*s**3 + 3851562500*p*q**11*s**3 - 92400750*p**14*q*r*s**3 - 2350175625*p**11*q**3*r*s**3 - 19470640625*p**8*q**5*r*s**3 - 52820593750*p**5*q**7*r*s**3 - 45447734375*p**2*q**9*r*s**3 + 1824363000*p**12*q*r**2*s**3 + 31435234375*p**9*q**3*r**2*s**3 + 141717537500*p**6*q**5*r**2*s**3 + 228370781250*p**3*q**7*r**2*s**3 + 34610078125*q**9*r**2*s**3 - 17591825625*p**10*q*r**3*s**3 - 188927187500*p**7*q**3*r**3*s**3 - 502088984375*p**4*q**5*r**3*s**3 - 187849296875*p*q**7*r**3*s**3 + 75577750000*p**8*q*r**4*s**3 + 342800000000*p**5*q**3*r**4*s**3 + 295384296875*p**2*q**5*r**4*s**3 - 107681250000*p**6*q*r**5*s**3 + 53330000000*p**3*q**3*r**5*s**3 + 271586875000*q**5*r**5*s**3 - 26410000000*p**4*q*r**6*s**3 - 188200000000*p*q**3*r**6*s**3 + 92000000000*p**2*q*r**7*s**3 + 120000000000*q*r**8*s**3 + 47840625*p**15*s**4 + 1150453125*p**12*q**2*s**4 + 9229453125*p**9*q**4*s**4 + 24954687500*p**6*q**6*s**4 + 22978515625*p**3*q**8*s**4 + 1367187500*q**10*s**4 - 1193737500*p**13*r*s**4 - 20817843750*p**10*q**2*r*s**4 - 98640000000*p**7*q**4*r*s**4 - 225767187500*p**4*q**6*r*s**4 - 74707031250*p*q**8*r*s**4 + 13431318750*p**11*r**2*s**4 + 188709843750*p**8*q**2*r**2*s**4 + 875157656250*p**5*q**4*r**2*s**4 + 593812890625*p**2*q**6*r**2*s**4 - 69869296875*p**9*r**3*s**4 - 854811093750*p**6*q**2*r**3*s**4 - 1730658203125*p**3*q**4*r**3*s**4 - 570867187500*q**6*r**3*s**4 + 162075625000*p**7*r**4*s**4 + 1536375000000*p**4*q**2*r**4*s**4 + 765156250000*p*q**4*r**4*s**4 - 165988750000*p**5*r**5*s**4 - 728968750000*p**2*q**2*r**5*s**4 + 121500000000*p**3*r**6*s**4 - 1039375000000*q**2*r**6*s**4 - 100000000000*p*r**7*s**4 - 379687500*p**11*q*s**5 - 11607421875*p**8*q**3*s**5 - 20830078125*p**5*q**5*s**5 - 33691406250*p**2*q**7*s**5 - 41491406250*p**9*q*r*s**5 - 419054687500*p**6*q**3*r*s**5 - 129511718750*p**3*q**5*r*s**5 + 311767578125*q**7*r*s**5 + 620116015625*p**7*q*r**2*s**5 + 1154687500000*p**4*q**3*r**2*s**5 + 36455078125*p*q**5*r**2*s**5 - 2265953125000*p**5*q*r**3*s**5 - 1509521484375*p**2*q**3*r**3*s**5 + 2530468750000*p**3*q*r**4*s**5 + 3259765625000*q**3*r**4*s**5 + 93750000000*p*q*r**5*s**5 + 23730468750*p**10*s**6 + 243603515625*p**7*q**2*s**6 + 341552734375*p**4*q**4*s**6 - 12207031250*p*q**6*s**6 - 357099609375*p**8*r*s**6 - 298193359375*p**5*q**2*r*s**6 + 406738281250*p**2*q**4*r*s**6 + 1615683593750*p**6*r**2*s**6 + 558593750000*p**3*q**2*r**2*s**6 - 2811035156250*q**4*r**2*s**6 - 2960937500000*p**4*r**3*s**6 - 3802246093750*p*q**2*r**3*s**6 + 2347656250000*p**2*r**4*s**6 - 671875000000*r**5*s**6 - 651855468750*p**6*q*s**7 - 1458740234375*p**3*q**3*s**7 - 152587890625*q**5*s**7 + 1628417968750*p**4*q*r*s**7 + 3948974609375*p*q**3*r*s**7 - 916748046875*p**2*q*r**2*s**7 + 1611328125000*q*r**3*s**7 + 640869140625*p**5*s**8 + 1068115234375*p**2*q**2*s**8 - 2044677734375*p**3*r*s**8 - 3204345703125*q**2*r*s**8 + 1739501953125*p*r**2*s**8
b[4][4] = -600*p**11*q**8 - 14050*p**8*q**10 - 109100*p**5*q**12 - 280800*p**2*q**14 + 7200*p**12*q**6*r + 188700*p**9*q**8*r + 1621725*p**6*q**10*r + 4577075*p**3*q**12*r + 5400*q**14*r - 28350*p**13*q**4*r**2 - 910600*p**10*q**6*r**2 - 9237975*p**7*q**8*r**2 - 30718900*p**4*q**10*r**2 - 5575950*p*q**12*r**2 + 36450*p**14*q**2*r**3 + 1848125*p**11*q**4*r**3 + 25137775*p**8*q**6*r**3 + 109591450*p**5*q**8*r**3 + 70627650*p**2*q**10*r**3 - 1317150*p**12*q**2*r**4 - 32857100*p**9*q**4*r**4 - 219125575*p**6*q**6*r**4 - 327565875*p**3*q**8*r**4 - 13011875*q**10*r**4 + 16484150*p**10*q**2*r**5 + 222242250*p**7*q**4*r**5 + 642173750*p**4*q**6*r**5 + 101263750*p*q**8*r**5 - 79345000*p**8*q**2*r**6 - 433180000*p**5*q**4*r**6 - 93731250*p**2*q**6*r**6 - 74300000*p**6*q**2*r**7 - 1057900000*p**3*q**4*r**7 - 591175000*q**6*r**7 + 1891600000*p**4*q**2*r**8 + 2796000000*p*q**4*r**8 - 4320000000*p**2*q**2*r**9 - 16200*p**13*q**5*s - 359500*p**10*q**7*s - 2603825*p**7*q**9*s - 4590375*p**4*q**11*s + 12352500*p*q**13*s + 121500*p**14*q**3*r*s + 3227400*p**11*q**5*r*s + 27301725*p**8*q**7*r*s + 59480975*p**5*q**9*r*s - 137308875*p**2*q**11*r*s - 218700*p**15*q*r**2*s - 8903925*p**12*q**3*r**2*s - 100918225*p**9*q**5*r**2*s - 325291300*p**6*q**7*r**2*s + 365705000*p**3*q**9*r**2*s + 94342500*q**11*r**2*s + 7632900*p**13*q*r**3*s + 162995400*p**10*q**3*r**3*s + 974558975*p**7*q**5*r**3*s + 930991250*p**4*q**7*r**3*s - 495368750*p*q**9*r**3*s - 97344900*p**11*q*r**4*s - 1406739250*p**8*q**3*r**4*s - 5572526250*p**5*q**5*r**4*s - 1903987500*p**2*q**7*r**4*s + 678550000*p**9*q*r**5*s + 8176215000*p**6*q**3*r**5*s + 18082050000*p**3*q**5*r**5*s + 5435843750*q**7*r**5*s - 2979800000*p**7*q*r**6*s - 29163500000*p**4*q**3*r**6*s - 27417500000*p*q**5*r**6*s + 6282400000*p**5*q*r**7*s + 48690000000*p**2*q**3*r**7*s - 2880000000*p**3*q*r**8*s + 7200000000*q**3*r**8*s - 109350*p**15*q**2*s**2 - 2405700*p**12*q**4*s**2 - 16125250*p**9*q**6*s**2 - 4930000*p**6*q**8*s**2 + 201150000*p**3*q**10*s**2 - 243000000*q**12*s**2 + 328050*p**16*r*s**2 + 10552275*p**13*q**2*r*s**2 + 88019100*p**10*q**4*r*s**2 - 4208625*p**7*q**6*r*s**2 - 1920390625*p**4*q**8*r*s**2 + 1759537500*p*q**10*r*s**2 - 11955600*p**14*r**2*s**2 - 196375050*p**11*q**2*r**2*s**2 - 555196250*p**8*q**4*r**2*s**2 + 4213270000*p**5*q**6*r**2*s**2 - 157468750*p**2*q**8*r**2*s**2 + 162656100*p**12*r**3*s**2 + 1880870000*p**9*q**2*r**3*s**2 + 753684375*p**6*q**4*r**3*s**2 - 25423062500*p**3*q**6*r**3*s**2 - 14142031250*q**8*r**3*s**2 - 1251948750*p**10*r**4*s**2 - 12524475000*p**7*q**2*r**4*s**2 + 18067656250*p**4*q**4*r**4*s**2 + 60531875000*p*q**6*r**4*s**2 + 6827725000*p**8*r**5*s**2 + 57157000000*p**5*q**2*r**5*s**2 - 75844531250*p**2*q**4*r**5*s**2 - 24452500000*p**6*r**6*s**2 - 144950000000*p**3*q**2*r**6*s**2 - 82109375000*q**4*r**6*s**2 + 46950000000*p**4*r**7*s**2 + 60000000000*p*q**2*r**7*s**2 - 36000000000*p**2*r**8*s**2 + 1549125*p**14*q*s**3 + 51873750*p**11*q**3*s**3 + 599781250*p**8*q**5*s**3 + 2421156250*p**5*q**7*s**3 - 1693515625*p**2*q**9*s**3 - 104884875*p**12*q*r*s**3 - 1937437500*p**9*q**3*r*s**3 - 11461053125*p**6*q**5*r*s**3 + 10299375000*p**3*q**7*r*s**3 + 10551250000*q**9*r*s**3 + 1336263750*p**10*q*r**2*s**3 + 23737250000*p**7*q**3*r**2*s**3 + 57136718750*p**4*q**5*r**2*s**3 - 8288906250*p*q**7*r**2*s**3 - 10907218750*p**8*q*r**3*s**3 - 160615000000*p**5*q**3*r**3*s**3 - 111134687500*p**2*q**5*r**3*s**3 + 46743125000*p**6*q*r**4*s**3 + 570509375000*p**3*q**3*r**4*s**3 + 274839843750*q**5*r**4*s**3 - 73312500000*p**4*q*r**5*s**3 - 145437500000*p*q**3*r**5*s**3 + 8750000000*p**2*q*r**6*s**3 + 180000000000*q*r**7*s**3 + 15946875*p**13*s**4 + 1265625*p**10*q**2*s**4 - 3282343750*p**7*q**4*s**4 - 38241406250*p**4*q**6*s**4 - 40136718750*p*q**8*s**4 - 113146875*p**11*r*s**4 - 2302734375*p**8*q**2*r*s**4 + 68450156250*p**5*q**4*r*s**4 + 177376562500*p**2*q**6*r*s**4 + 3164062500*p**9*r**2*s**4 + 14392890625*p**6*q**2*r**2*s**4 - 543781250000*p**3*q**4*r**2*s**4 - 319769531250*q**6*r**2*s**4 - 21048281250*p**7*r**3*s**4 - 240687500000*p**4*q**2*r**3*s**4 - 228164062500*p*q**4*r**3*s**4 + 23062500000*p**5*r**4*s**4 + 300410156250*p**2*q**2*r**4*s**4 + 93437500000*p**3*r**5*s**4 - 1141015625000*q**2*r**5*s**4 - 187500000000*p*r**6*s**4 + 1761328125*p**9*q*s**5 - 3177734375*p**6*q**3*s**5 + 60019531250*p**3*q**5*s**5 + 108398437500*q**7*s**5 + 24106640625*p**7*q*r*s**5 + 429589843750*p**4*q**3*r*s**5 + 410371093750*p*q**5*r*s**5 - 23582031250*p**5*q*r**2*s**5 + 202441406250*p**2*q**3*r**2*s**5 - 383203125000*p**3*q*r**3*s**5 + 2232910156250*q**3*r**3*s**5 + 1500000000000*p*q*r**4*s**5 - 13710937500*p**8*s**6 - 202832031250*p**5*q**2*s**6 - 531738281250*p**2*q**4*s**6 + 73330078125*p**6*r*s**6 - 3906250000*p**3*q**2*r*s**6 - 1275878906250*q**4*r*s**6 - 121093750000*p**4*r**2*s**6 - 3308593750000*p*q**2*r**2*s**6 + 18066406250*p**2*r**3*s**6 - 244140625000*r**4*s**6 + 327148437500*p**4*q*s**7 + 1672363281250*p*q**3*s**7 + 446777343750*p**2*q*r*s**7 + 1232910156250*q*r**2*s**7 - 274658203125*p**3*s**8 - 1068115234375*q**2*s**8 - 61035156250*p*r*s**8
b[4][3] = 200*p**9*q**8 + 7550*p**6*q**10 + 78650*p**3*q**12 + 248400*q**14 - 4800*p**10*q**6*r - 164300*p**7*q**8*r - 1709575*p**4*q**10*r - 5566500*p*q**12*r + 31050*p**11*q**4*r**2 + 1116175*p**8*q**6*r**2 + 12674650*p**5*q**8*r**2 + 45333850*p**2*q**10*r**2 - 60750*p**12*q**2*r**3 - 2872725*p**9*q**4*r**3 - 40403050*p**6*q**6*r**3 - 173564375*p**3*q**8*r**3 - 11242250*q**10*r**3 + 2174100*p**10*q**2*r**4 + 54010000*p**7*q**4*r**4 + 331074875*p**4*q**6*r**4 + 114173750*p*q**8*r**4 - 24858500*p**8*q**2*r**5 - 300875000*p**5*q**4*r**5 - 319430625*p**2*q**6*r**5 + 69810000*p**6*q**2*r**6 - 23900000*p**3*q**4*r**6 - 294662500*q**6*r**6 + 524200000*p**4*q**2*r**7 + 1432000000*p*q**4*r**7 - 2340000000*p**2*q**2*r**8 + 5400*p**11*q**5*s + 310400*p**8*q**7*s + 3591725*p**5*q**9*s + 11556750*p**2*q**11*s - 105300*p**12*q**3*r*s - 4234650*p**9*q**5*r*s - 49928875*p**6*q**7*r*s - 174078125*p**3*q**9*r*s + 18000000*q**11*r*s + 364500*p**13*q*r**2*s + 15763050*p**10*q**3*r**2*s + 220187400*p**7*q**5*r**2*s + 929609375*p**4*q**7*r**2*s - 43653125*p*q**9*r**2*s - 13427100*p**11*q*r**3*s - 346066250*p**8*q**3*r**3*s - 2287673375*p**5*q**5*r**3*s - 1403903125*p**2*q**7*r**3*s + 184586000*p**9*q*r**4*s + 2983460000*p**6*q**3*r**4*s + 8725818750*p**3*q**5*r**4*s + 2527734375*q**7*r**4*s - 1284480000*p**7*q*r**5*s - 13138250000*p**4*q**3*r**5*s - 14001625000*p*q**5*r**5*s + 4224800000*p**5*q*r**6*s + 27460000000*p**2*q**3*r**6*s - 3760000000*p**3*q*r**7*s + 3900000000*q**3*r**7*s + 36450*p**13*q**2*s**2 + 2765475*p**10*q**4*s**2 + 34027625*p**7*q**6*s**2 + 97375000*p**4*q**8*s**2 - 88275000*p*q**10*s**2 - 546750*p**14*r*s**2 - 21961125*p**11*q**2*r*s**2 - 273059375*p**8*q**4*r*s**2 - 761562500*p**5*q**6*r*s**2 + 1869656250*p**2*q**8*r*s**2 + 20545650*p**12*r**2*s**2 + 473934375*p**9*q**2*r**2*s**2 + 1758053125*p**6*q**4*r**2*s**2 - 8743359375*p**3*q**6*r**2*s**2 - 4154375000*q**8*r**2*s**2 - 296559000*p**10*r**3*s**2 - 4065056250*p**7*q**2*r**3*s**2 - 186328125*p**4*q**4*r**3*s**2 + 19419453125*p*q**6*r**3*s**2 + 2326262500*p**8*r**4*s**2 + 21189375000*p**5*q**2*r**4*s**2 - 26301953125*p**2*q**4*r**4*s**2 - 10513250000*p**6*r**5*s**2 - 69937500000*p**3*q**2*r**5*s**2 - 42257812500*q**4*r**5*s**2 + 23375000000*p**4*r**6*s**2 + 40750000000*p*q**2*r**6*s**2 - 19500000000*p**2*r**7*s**2 + 4009500*p**12*q*s**3 + 36140625*p**9*q**3*s**3 - 335459375*p**6*q**5*s**3 - 2695312500*p**3*q**7*s**3 - 1486250000*q**9*s**3 + 102515625*p**10*q*r*s**3 + 4006812500*p**7*q**3*r*s**3 + 27589609375*p**4*q**5*r*s**3 + 20195312500*p*q**7*r*s**3 - 2792812500*p**8*q*r**2*s**3 - 44115156250*p**5*q**3*r**2*s**3 - 72609453125*p**2*q**5*r**2*s**3 + 18752500000*p**6*q*r**3*s**3 + 218140625000*p**3*q**3*r**3*s**3 + 109940234375*q**5*r**3*s**3 - 21893750000*p**4*q*r**4*s**3 - 65187500000*p*q**3*r**4*s**3 - 31000000000*p**2*q*r**5*s**3 + 97500000000*q*r**6*s**3 - 86568750*p**11*s**4 - 1955390625*p**8*q**2*s**4 - 8960781250*p**5*q**4*s**4 - 1357812500*p**2*q**6*s**4 + 1657968750*p**9*r*s**4 + 10467187500*p**6*q**2*r*s**4 - 55292968750*p**3*q**4*r*s**4 - 60683593750*q**6*r*s**4 - 11473593750*p**7*r**2*s**4 - 123281250000*p**4*q**2*r**2*s**4 - 164912109375*p*q**4*r**2*s**4 + 13150000000*p**5*r**3*s**4 + 190751953125*p**2*q**2*r**3*s**4 + 61875000000*p**3*r**4*s**4 - 467773437500*q**2*r**4*s**4 - 118750000000*p*r**5*s**4 + 7583203125*p**7*q*s**5 + 54638671875*p**4*q**3*s**5 + 39423828125*p*q**5*s**5 + 32392578125*p**5*q*r*s**5 + 278515625000*p**2*q**3*r*s**5 - 298339843750*p**3*q*r**2*s**5 + 560791015625*q**3*r**2*s**5 + 720703125000*p*q*r**3*s**5 - 19687500000*p**6*s**6 - 159667968750*p**3*q**2*s**6 - 72265625000*q**4*s**6 + 116699218750*p**4*r*s**6 - 924072265625*p*q**2*r*s**6 - 156005859375*p**2*r**2*s**6 - 112304687500*r**3*s**6 + 349121093750*p**2*q*s**7 + 396728515625*q*r*s**7 - 213623046875*p*s**8
b[4][2] = -600*p**10*q**6 - 18450*p**7*q**8 - 174000*p**4*q**10 - 518400*p*q**12 + 5400*p**11*q**4*r + 197550*p**8*q**6*r + 2147775*p**5*q**8*r + 7219800*p**2*q**10*r - 12150*p**12*q**2*r**2 - 662200*p**9*q**4*r**2 - 9274775*p**6*q**6*r**2 - 38330625*p**3*q**8*r**2 - 5508000*q**10*r**2 + 656550*p**10*q**2*r**3 + 16233750*p**7*q**4*r**3 + 97335875*p**4*q**6*r**3 + 58271250*p*q**8*r**3 - 9845500*p**8*q**2*r**4 - 119464375*p**5*q**4*r**4 - 194431875*p**2*q**6*r**4 + 49465000*p**6*q**2*r**5 + 166000000*p**3*q**4*r**5 - 80793750*q**6*r**5 + 54400000*p**4*q**2*r**6 + 377750000*p*q**4*r**6 - 630000000*p**2*q**2*r**7 - 16200*p**12*q**3*s - 459300*p**9*q**5*s - 4207225*p**6*q**7*s - 10827500*p**3*q**9*s + 13635000*q**11*s + 72900*p**13*q*r*s + 2877300*p**10*q**3*r*s + 33239700*p**7*q**5*r*s + 107080625*p**4*q**7*r*s - 114975000*p*q**9*r*s - 3601800*p**11*q*r**2*s - 75214375*p**8*q**3*r**2*s - 387073250*p**5*q**5*r**2*s + 55540625*p**2*q**7*r**2*s + 53793000*p**9*q*r**3*s + 687176875*p**6*q**3*r**3*s + 1670018750*p**3*q**5*r**3*s + 665234375*q**7*r**3*s - 391570000*p**7*q*r**4*s - 3420125000*p**4*q**3*r**4*s - 3609625000*p*q**5*r**4*s + 1365600000*p**5*q*r**5*s + 7236250000*p**2*q**3*r**5*s - 1220000000*p**3*q*r**6*s + 1050000000*q**3*r**6*s - 109350*p**14*s**2 - 3065850*p**11*q**2*s**2 - 26908125*p**8*q**4*s**2 - 44606875*p**5*q**6*s**2 + 269812500*p**2*q**8*s**2 + 5200200*p**12*r*s**2 + 81826875*p**9*q**2*r*s**2 + 155378125*p**6*q**4*r*s**2 - 1936203125*p**3*q**6*r*s**2 - 998437500*q**8*r*s**2 - 77145750*p**10*r**2*s**2 - 745528125*p**7*q**2*r**2*s**2 + 683437500*p**4*q**4*r**2*s**2 + 4083359375*p*q**6*r**2*s**2 + 593287500*p**8*r**3*s**2 + 4799375000*p**5*q**2*r**3*s**2 - 4167578125*p**2*q**4*r**3*s**2 - 2731125000*p**6*r**4*s**2 - 18668750000*p**3*q**2*r**4*s**2 - 10480468750*q**4*r**4*s**2 + 6200000000*p**4*r**5*s**2 + 11750000000*p*q**2*r**5*s**2 - 5250000000*p**2*r**6*s**2 + 26527500*p**10*q*s**3 + 526031250*p**7*q**3*s**3 + 3160703125*p**4*q**5*s**3 + 2650312500*p*q**7*s**3 - 448031250*p**8*q*r*s**3 - 6682968750*p**5*q**3*r*s**3 - 11642812500*p**2*q**5*r*s**3 + 2553203125*p**6*q*r**2*s**3 + 37234375000*p**3*q**3*r**2*s**3 + 21871484375*q**5*r**2*s**3 + 2803125000*p**4*q*r**3*s**3 - 10796875000*p*q**3*r**3*s**3 - 16656250000*p**2*q*r**4*s**3 + 26250000000*q*r**5*s**3 - 75937500*p**9*s**4 - 704062500*p**6*q**2*s**4 - 8363281250*p**3*q**4*s**4 - 10398437500*q**6*s**4 + 197578125*p**7*r*s**4 - 16441406250*p**4*q**2*r*s**4 - 24277343750*p*q**4*r*s**4 - 5716015625*p**5*r**2*s**4 + 31728515625*p**2*q**2*r**2*s**4 + 27031250000*p**3*r**3*s**4 - 92285156250*q**2*r**3*s**4 - 33593750000*p*r**4*s**4 + 10394531250*p**5*q*s**5 + 38037109375*p**2*q**3*s**5 - 48144531250*p**3*q*r*s**5 + 74462890625*q**3*r*s**5 + 121093750000*p*q*r**2*s**5 - 2197265625*p**4*s**6 - 92529296875*p*q**2*s**6 + 15380859375*p**2*r*s**6 - 31738281250*r**2*s**6 + 54931640625*q*s**7
b[4][1] = 200*p**8*q**6 + 2950*p**5*q**8 + 10800*p**2*q**10 - 1800*p**9*q**4*r - 49650*p**6*q**6*r - 403375*p**3*q**8*r - 999000*q**10*r + 4050*p**10*q**2*r**2 + 236625*p**7*q**4*r**2 + 3109500*p**4*q**6*r**2 + 11463750*p*q**8*r**2 - 331500*p**8*q**2*r**3 - 7818125*p**5*q**4*r**3 - 41411250*p**2*q**6*r**3 + 4782500*p**6*q**2*r**4 + 47475000*p**3*q**4*r**4 - 16728125*q**6*r**4 - 8700000*p**4*q**2*r**5 + 81750000*p*q**4*r**5 - 135000000*p**2*q**2*r**6 + 5400*p**10*q**3*s + 144200*p**7*q**5*s + 939375*p**4*q**7*s + 1012500*p*q**9*s - 24300*p**11*q*r*s - 1169250*p**8*q**3*r*s - 14027250*p**5*q**5*r*s - 44446875*p**2*q**7*r*s + 2011500*p**9*q*r**2*s + 49330625*p**6*q**3*r**2*s + 272009375*p**3*q**5*r**2*s + 104062500*q**7*r**2*s - 34660000*p**7*q*r**3*s - 455062500*p**4*q**3*r**3*s - 625906250*p*q**5*r**3*s + 210200000*p**5*q*r**4*s + 1298750000*p**2*q**3*r**4*s - 240000000*p**3*q*r**5*s + 225000000*q**3*r**5*s + 36450*p**12*s**2 + 1231875*p**9*q**2*s**2 + 10712500*p**6*q**4*s**2 + 21718750*p**3*q**6*s**2 + 16875000*q**8*s**2 - 2814750*p**10*r*s**2 - 67612500*p**7*q**2*r*s**2 - 345156250*p**4*q**4*r*s**2 - 283125000*p*q**6*r*s**2 + 51300000*p**8*r**2*s**2 + 734531250*p**5*q**2*r**2*s**2 + 1267187500*p**2*q**4*r**2*s**2 - 384312500*p**6*r**3*s**2 - 3912500000*p**3*q**2*r**3*s**2 - 1822265625*q**4*r**3*s**2 + 1112500000*p**4*r**4*s**2 + 2437500000*p*q**2*r**4*s**2 - 1125000000*p**2*r**5*s**2 - 72578125*p**5*q**3*s**3 - 189296875*p**2*q**5*s**3 + 127265625*p**6*q*r*s**3 + 1415625000*p**3*q**3*r*s**3 + 1229687500*q**5*r*s**3 + 1448437500*p**4*q*r**2*s**3 + 2218750000*p*q**3*r**2*s**3 - 4031250000*p**2*q*r**3*s**3 + 5625000000*q*r**4*s**3 - 132890625*p**7*s**4 - 529296875*p**4*q**2*s**4 - 175781250*p*q**4*s**4 - 401953125*p**5*r*s**4 - 4482421875*p**2*q**2*r*s**4 + 4140625000*p**3*r**2*s**4 - 10498046875*q**2*r**2*s**4 - 7031250000*p*r**3*s**4 + 1220703125*p**3*q*s**5 + 1953125000*q**3*s**5 + 14160156250*p*q*r*s**5 - 1708984375*p**2*s**6 - 3662109375*r*s**6
b[4][0] = -4600*p**6*q**6 - 67850*p**3*q**8 - 248400*q**10 + 38900*p**7*q**4*r + 679575*p**4*q**6*r + 2866500*p*q**8*r - 81900*p**8*q**2*r**2 - 2009750*p**5*q**4*r**2 - 10783750*p**2*q**6*r**2 + 1478750*p**6*q**2*r**3 + 14165625*p**3*q**4*r**3 - 2743750*q**6*r**3 - 5450000*p**4*q**2*r**4 + 12687500*p*q**4*r**4 - 22500000*p**2*q**2*r**5 - 101700*p**8*q**3*s - 1700975*p**5*q**5*s - 7061250*p**2*q**7*s + 423900*p**9*q*r*s + 9292375*p**6*q**3*r*s + 50438750*p**3*q**5*r*s + 20475000*q**7*r*s - 7852500*p**7*q*r**2*s - 87765625*p**4*q**3*r**2*s - 121609375*p*q**5*r**2*s + 47700000*p**5*q*r**3*s + 264687500*p**2*q**3*r**3*s - 65000000*p**3*q*r**4*s + 37500000*q**3*r**4*s - 534600*p**10*s**2 - 10344375*p**7*q**2*s**2 - 54859375*p**4*q**4*s**2 - 40312500*p*q**6*s**2 + 10158750*p**8*r*s**2 + 117778125*p**5*q**2*r*s**2 + 192421875*p**2*q**4*r*s**2 - 70593750*p**6*r**2*s**2 - 685312500*p**3*q**2*r**2*s**2 - 334375000*q**4*r**2*s**2 + 193750000*p**4*r**3*s**2 + 500000000*p*q**2*r**3*s**2 - 187500000*p**2*r**4*s**2 + 8437500*p**6*q*s**3 + 159218750*p**3*q**3*s**3 + 220625000*q**5*s**3 + 353828125*p**4*q*r*s**3 + 412500000*p*q**3*r*s**3 - 1023437500*p**2*q*r**2*s**3 + 937500000*q*r**3*s**3 - 206015625*p**5*s**4 - 701171875*p**2*q**2*s**4 + 998046875*p**3*r*s**4 - 1308593750*q**2*r*s**4 - 1367187500*p*r**2*s**4 + 1708984375*p*q*s**5 - 976562500*s**6
return b
@property
def o(self):
p, q, r, s = self.p, self.q, self.r, self.s
o = [0]*6
o[5] = -1600*p**10*q**10 - 23600*p**7*q**12 - 86400*p**4*q**14 + 24800*p**11*q**8*r + 419200*p**8*q**10*r + 1850450*p**5*q**12*r + 896400*p**2*q**14*r - 138800*p**12*q**6*r**2 - 2921900*p**9*q**8*r**2 - 17295200*p**6*q**10*r**2 - 27127750*p**3*q**12*r**2 - 26076600*q**14*r**2 + 325800*p**13*q**4*r**3 + 9993850*p**10*q**6*r**3 + 88010500*p**7*q**8*r**3 + 274047650*p**4*q**10*r**3 + 410171400*p*q**12*r**3 - 259200*p**14*q**2*r**4 - 17147100*p**11*q**4*r**4 - 254289150*p**8*q**6*r**4 - 1318548225*p**5*q**8*r**4 - 2633598475*p**2*q**10*r**4 + 12636000*p**12*q**2*r**5 + 388911000*p**9*q**4*r**5 + 3269704725*p**6*q**6*r**5 + 8791192300*p**3*q**8*r**5 + 93560575*q**10*r**5 - 228361600*p**10*q**2*r**6 - 3951199200*p**7*q**4*r**6 - 16276981100*p**4*q**6*r**6 - 1597227000*p*q**8*r**6 + 1947899200*p**8*q**2*r**7 + 17037648000*p**5*q**4*r**7 + 8919740000*p**2*q**6*r**7 - 7672160000*p**6*q**2*r**8 - 15496000000*p**3*q**4*r**8 + 4224000000*q**6*r**8 + 9968000000*p**4*q**2*r**9 - 8640000000*p*q**4*r**9 + 4800000000*p**2*q**2*r**10 - 55200*p**12*q**7*s - 685600*p**9*q**9*s + 1028250*p**6*q**11*s + 37650000*p**3*q**13*s + 111375000*q**15*s + 583200*p**13*q**5*r*s + 9075600*p**10*q**7*r*s - 883150*p**7*q**9*r*s - 506830750*p**4*q**11*r*s - 1793137500*p*q**13*r*s - 1852200*p**14*q**3*r**2*s - 41435250*p**11*q**5*r**2*s - 80566700*p**8*q**7*r**2*s + 2485673600*p**5*q**9*r**2*s + 11442286125*p**2*q**11*r**2*s + 1555200*p**15*q*r**3*s + 80846100*p**12*q**3*r**3*s + 564906800*p**9*q**5*r**3*s - 4493012400*p**6*q**7*r**3*s - 35492391250*p**3*q**9*r**3*s - 789931875*q**11*r**3*s - 71766000*p**13*q*r**4*s - 1551149200*p**10*q**3*r**4*s - 1773437900*p**7*q**5*r**4*s + 51957593125*p**4*q**7*r**4*s + 14964765625*p*q**9*r**4*s + 1231569600*p**11*q*r**5*s + 12042977600*p**8*q**3*r**5*s - 27151011200*p**5*q**5*r**5*s - 88080610000*p**2*q**7*r**5*s - 9912995200*p**9*q*r**6*s - 29448104000*p**6*q**3*r**6*s + 144954840000*p**3*q**5*r**6*s - 44601300000*q**7*r**6*s + 35453760000*p**7*q*r**7*s - 63264000000*p**4*q**3*r**7*s + 60544000000*p*q**5*r**7*s - 30048000000*p**5*q*r**8*s + 37040000000*p**2*q**3*r**8*s - 60800000000*p**3*q*r**9*s - 48000000000*q**3*r**9*s - 615600*p**14*q**4*s**2 - 10524500*p**11*q**6*s**2 - 33831250*p**8*q**8*s**2 + 222806250*p**5*q**10*s**2 + 1099687500*p**2*q**12*s**2 + 3353400*p**15*q**2*r*s**2 + 74269350*p**12*q**4*r*s**2 + 276445750*p**9*q**6*r*s**2 - 2618600000*p**6*q**8*r*s**2 - 14473243750*p**3*q**10*r*s**2 + 1383750000*q**12*r*s**2 - 2332800*p**16*r**2*s**2 - 132750900*p**13*q**2*r**2*s**2 - 900775150*p**10*q**4*r**2*s**2 + 8249244500*p**7*q**6*r**2*s**2 + 59525796875*p**4*q**8*r**2*s**2 - 40292868750*p*q**10*r**2*s**2 + 128304000*p**14*r**3*s**2 + 3160232100*p**11*q**2*r**3*s**2 + 8329580000*p**8*q**4*r**3*s**2 - 45558458750*p**5*q**6*r**3*s**2 + 297252890625*p**2*q**8*r**3*s**2 - 2769854400*p**12*r**4*s**2 - 37065970000*p**9*q**2*r**4*s**2 - 90812546875*p**6*q**4*r**4*s**2 - 627902000000*p**3*q**6*r**4*s**2 + 181347421875*q**8*r**4*s**2 + 30946932800*p**10*r**5*s**2 + 249954680000*p**7*q**2*r**5*s**2 + 802954812500*p**4*q**4*r**5*s**2 - 80900000000*p*q**6*r**5*s**2 - 192137320000*p**8*r**6*s**2 - 932641600000*p**5*q**2*r**6*s**2 - 943242500000*p**2*q**4*r**6*s**2 + 658412000000*p**6*r**7*s**2 + 1930720000000*p**3*q**2*r**7*s**2 + 593800000000*q**4*r**7*s**2 - 1162800000000*p**4*r**8*s**2 - 280000000000*p*q**2*r**8*s**2 + 840000000000*p**2*r**9*s**2 - 2187000*p**16*q*s**3 - 47418750*p**13*q**3*s**3 - 180618750*p**10*q**5*s**3 + 2231250000*p**7*q**7*s**3 + 17857734375*p**4*q**9*s**3 + 29882812500*p*q**11*s**3 + 24664500*p**14*q*r*s**3 - 853368750*p**11*q**3*r*s**3 - 25939693750*p**8*q**5*r*s**3 - 177541562500*p**5*q**7*r*s**3 - 297978828125*p**2*q**9*r*s**3 - 153468000*p**12*q*r**2*s**3 + 30188125000*p**9*q**3*r**2*s**3 + 344049821875*p**6*q**5*r**2*s**3 + 534026875000*p**3*q**7*r**2*s**3 - 340726484375*q**9*r**2*s**3 - 9056190000*p**10*q*r**3*s**3 - 322314687500*p**7*q**3*r**3*s**3 - 769632109375*p**4*q**5*r**3*s**3 - 83276875000*p*q**7*r**3*s**3 + 164061000000*p**8*q*r**4*s**3 + 1381358750000*p**5*q**3*r**4*s**3 + 3088020000000*p**2*q**5*r**4*s**3 - 1267655000000*p**6*q*r**5*s**3 - 7642630000000*p**3*q**3*r**5*s**3 - 2759877500000*q**5*r**5*s**3 + 4597760000000*p**4*q*r**6*s**3 + 1846200000000*p*q**3*r**6*s**3 - 7006000000000*p**2*q*r**7*s**3 - 1200000000000*q*r**8*s**3 + 18225000*p**15*s**4 + 1328906250*p**12*q**2*s**4 + 24729140625*p**9*q**4*s**4 + 169467187500*p**6*q**6*s**4 + 413281250000*p**3*q**8*s**4 + 223828125000*q**10*s**4 + 710775000*p**13*r*s**4 - 18611015625*p**10*q**2*r*s**4 - 314344375000*p**7*q**4*r*s**4 - 828439843750*p**4*q**6*r*s**4 + 460937500000*p*q**8*r*s**4 - 25674975000*p**11*r**2*s**4 - 52223515625*p**8*q**2*r**2*s**4 - 387160000000*p**5*q**4*r**2*s**4 - 4733680078125*p**2*q**6*r**2*s**4 + 343911875000*p**9*r**3*s**4 + 3328658359375*p**6*q**2*r**3*s**4 + 16532406250000*p**3*q**4*r**3*s**4 + 5980613281250*q**6*r**3*s**4 - 2295497500000*p**7*r**4*s**4 - 14809820312500*p**4*q**2*r**4*s**4 - 6491406250000*p*q**4*r**4*s**4 + 7768470000000*p**5*r**5*s**4 + 34192562500000*p**2*q**2*r**5*s**4 - 11859000000000*p**3*r**6*s**4 + 10530000000000*q**2*r**6*s**4 + 6000000000000*p*r**7*s**4 + 11453906250*p**11*q*s**5 + 149765625000*p**8*q**3*s**5 + 545537109375*p**5*q**5*s**5 + 527343750000*p**2*q**7*s**5 - 371313281250*p**9*q*r*s**5 - 3461455078125*p**6*q**3*r*s**5 - 7920878906250*p**3*q**5*r*s**5 - 4747314453125*q**7*r*s**5 + 2417815625000*p**7*q*r**2*s**5 + 5465576171875*p**4*q**3*r**2*s**5 + 5937128906250*p*q**5*r**2*s**5 - 10661156250000*p**5*q*r**3*s**5 - 63574218750000*p**2*q**3*r**3*s**5 + 24059375000000*p**3*q*r**4*s**5 - 33023437500000*q**3*r**4*s**5 - 43125000000000*p*q*r**5*s**5 + 94394531250*p**10*s**6 + 1097167968750*p**7*q**2*s**6 + 2829833984375*p**4*q**4*s**6 - 1525878906250*p*q**6*s**6 + 2724609375*p**8*r*s**6 + 13998535156250*p**5*q**2*r*s**6 + 57094482421875*p**2*q**4*r*s**6 - 8512509765625*p**6*r**2*s**6 - 37941406250000*p**3*q**2*r**2*s**6 + 33191894531250*q**4*r**2*s**6 + 50534179687500*p**4*r**3*s**6 + 156656250000000*p*q**2*r**3*s**6 - 85023437500000*p**2*r**4*s**6 + 10125000000000*r**5*s**6 - 2717285156250*p**6*q*s**7 - 11352539062500*p**3*q**3*s**7 - 2593994140625*q**5*s**7 - 47154541015625*p**4*q*r*s**7 - 160644531250000*p*q**3*r*s**7 + 142500000000000*p**2*q*r**2*s**7 - 26757812500000*q*r**3*s**7 - 4364013671875*p**5*s**8 - 94604492187500*p**2*q**2*s**8 + 114379882812500*p**3*r*s**8 + 51116943359375*q**2*r*s**8 - 346435546875000*p*r**2*s**8 + 476837158203125*p*q*s**9 - 476837158203125*s**10
o[4] = 1600*p**11*q**8 + 20800*p**8*q**10 + 45100*p**5*q**12 - 151200*p**2*q**14 - 19200*p**12*q**6*r - 293200*p**9*q**8*r - 794600*p**6*q**10*r + 2634675*p**3*q**12*r + 2640600*q**14*r + 75600*p**13*q**4*r**2 + 1529100*p**10*q**6*r**2 + 6233350*p**7*q**8*r**2 - 12013350*p**4*q**10*r**2 - 29069550*p*q**12*r**2 - 97200*p**14*q**2*r**3 - 3562500*p**11*q**4*r**3 - 26984900*p**8*q**6*r**3 - 15900325*p**5*q**8*r**3 + 76267100*p**2*q**10*r**3 + 3272400*p**12*q**2*r**4 + 59486850*p**9*q**4*r**4 + 221270075*p**6*q**6*r**4 + 74065250*p**3*q**8*r**4 - 300564375*q**10*r**4 - 45569400*p**10*q**2*r**5 - 438666000*p**7*q**4*r**5 - 444821250*p**4*q**6*r**5 + 2448256250*p*q**8*r**5 + 290640000*p**8*q**2*r**6 + 855850000*p**5*q**4*r**6 - 5741875000*p**2*q**6*r**6 - 644000000*p**6*q**2*r**7 + 5574000000*p**3*q**4*r**7 + 4643000000*q**6*r**7 - 1696000000*p**4*q**2*r**8 - 12660000000*p*q**4*r**8 + 7200000000*p**2*q**2*r**9 + 43200*p**13*q**5*s + 572000*p**10*q**7*s - 59800*p**7*q**9*s - 24174625*p**4*q**11*s - 74587500*p*q**13*s - 324000*p**14*q**3*r*s - 5531400*p**11*q**5*r*s - 3712100*p**8*q**7*r*s + 293009275*p**5*q**9*r*s + 1115548875*p**2*q**11*r*s + 583200*p**15*q*r**2*s + 18343800*p**12*q**3*r**2*s + 77911100*p**9*q**5*r**2*s - 957488825*p**6*q**7*r**2*s - 5449661250*p**3*q**9*r**2*s + 960120000*q**11*r**2*s - 23684400*p**13*q*r**3*s - 373761900*p**10*q**3*r**3*s - 27944975*p**7*q**5*r**3*s + 10375740625*p**4*q**7*r**3*s - 4649093750*p*q**9*r**3*s + 395816400*p**11*q*r**4*s + 2910968000*p**8*q**3*r**4*s - 9126162500*p**5*q**5*r**4*s - 11696118750*p**2*q**7*r**4*s - 3028640000*p**9*q*r**5*s - 3251550000*p**6*q**3*r**5*s + 47914250000*p**3*q**5*r**5*s - 30255625000*q**7*r**5*s + 9304000000*p**7*q*r**6*s - 42970000000*p**4*q**3*r**6*s + 31475000000*p*q**5*r**6*s + 2176000000*p**5*q*r**7*s + 62100000000*p**2*q**3*r**7*s - 43200000000*p**3*q*r**8*s - 72000000000*q**3*r**8*s + 291600*p**15*q**2*s**2 + 2702700*p**12*q**4*s**2 - 38692250*p**9*q**6*s**2 - 538903125*p**6*q**8*s**2 - 1613112500*p**3*q**10*s**2 + 320625000*q**12*s**2 - 874800*p**16*r*s**2 - 14166900*p**13*q**2*r*s**2 + 193284900*p**10*q**4*r*s**2 + 3688520500*p**7*q**6*r*s**2 + 11613390625*p**4*q**8*r*s**2 - 15609881250*p*q**10*r*s**2 + 44031600*p**14*r**2*s**2 + 482345550*p**11*q**2*r**2*s**2 - 2020881875*p**8*q**4*r**2*s**2 - 7407026250*p**5*q**6*r**2*s**2 + 136175750000*p**2*q**8*r**2*s**2 - 1000884600*p**12*r**3*s**2 - 8888950000*p**9*q**2*r**3*s**2 - 30101703125*p**6*q**4*r**3*s**2 - 319761000000*p**3*q**6*r**3*s**2 + 51519218750*q**8*r**3*s**2 + 12622395000*p**10*r**4*s**2 + 97032450000*p**7*q**2*r**4*s**2 + 469929218750*p**4*q**4*r**4*s**2 + 291342187500*p*q**6*r**4*s**2 - 96382000000*p**8*r**5*s**2 - 598070000000*p**5*q**2*r**5*s**2 - 1165021875000*p**2*q**4*r**5*s**2 + 446500000000*p**6*r**6*s**2 + 1651500000000*p**3*q**2*r**6*s**2 + 789375000000*q**4*r**6*s**2 - 1152000000000*p**4*r**7*s**2 - 600000000000*p*q**2*r**7*s**2 + 1260000000000*p**2*r**8*s**2 - 24786000*p**14*q*s**3 - 660487500*p**11*q**3*s**3 - 5886356250*p**8*q**5*s**3 - 18137187500*p**5*q**7*s**3 - 5120546875*p**2*q**9*s**3 + 827658000*p**12*q*r*s**3 + 13343062500*p**9*q**3*r*s**3 + 39782068750*p**6*q**5*r*s**3 - 111288437500*p**3*q**7*r*s**3 - 15438750000*q**9*r*s**3 - 14540782500*p**10*q*r**2*s**3 - 135889750000*p**7*q**3*r**2*s**3 - 176892578125*p**4*q**5*r**2*s**3 - 934462656250*p*q**7*r**2*s**3 + 171669250000*p**8*q*r**3*s**3 + 1164538125000*p**5*q**3*r**3*s**3 + 3192346406250*p**2*q**5*r**3*s**3 - 1295476250000*p**6*q*r**4*s**3 - 6540712500000*p**3*q**3*r**4*s**3 - 2957828125000*q**5*r**4*s**3 + 5366750000000*p**4*q*r**5*s**3 + 3165000000000*p*q**3*r**5*s**3 - 8862500000000*p**2*q*r**6*s**3 - 1800000000000*q*r**7*s**3 + 236925000*p**13*s**4 + 8895234375*p**10*q**2*s**4 + 106180781250*p**7*q**4*s**4 + 474221875000*p**4*q**6*s**4 + 616210937500*p*q**8*s**4 - 6995868750*p**11*r*s**4 - 184190625000*p**8*q**2*r*s**4 - 1299254453125*p**5*q**4*r*s**4 - 2475458593750*p**2*q**6*r*s**4 + 63049218750*p**9*r**2*s**4 + 1646791484375*p**6*q**2*r**2*s**4 + 9086886718750*p**3*q**4*r**2*s**4 + 4673421875000*q**6*r**2*s**4 - 215665000000*p**7*r**3*s**4 - 7864589843750*p**4*q**2*r**3*s**4 - 5987890625000*p*q**4*r**3*s**4 + 594843750000*p**5*r**4*s**4 + 27791171875000*p**2*q**2*r**4*s**4 - 3881250000000*p**3*r**5*s**4 + 12203125000000*q**2*r**5*s**4 + 10312500000000*p*r**6*s**4 - 34720312500*p**9*q*s**5 - 545126953125*p**6*q**3*s**5 - 2176425781250*p**3*q**5*s**5 - 2792968750000*q**7*s**5 - 1395703125*p**7*q*r*s**5 - 1957568359375*p**4*q**3*r*s**5 + 5122636718750*p*q**5*r*s**5 + 858210937500*p**5*q*r**2*s**5 - 42050097656250*p**2*q**3*r**2*s**5 + 7088281250000*p**3*q*r**3*s**5 - 25974609375000*q**3*r**3*s**5 - 69296875000000*p*q*r**4*s**5 + 384697265625*p**8*s**6 + 6403320312500*p**5*q**2*s**6 + 16742675781250*p**2*q**4*s**6 - 3467080078125*p**6*r*s**6 + 11009765625000*p**3*q**2*r*s**6 + 16451660156250*q**4*r*s**6 + 6979003906250*p**4*r**2*s**6 + 145403320312500*p*q**2*r**2*s**6 + 4076171875000*p**2*r**3*s**6 + 22265625000000*r**4*s**6 - 21915283203125*p**4*q*s**7 - 86608886718750*p*q**3*s**7 - 22785644531250*p**2*q*r*s**7 - 103466796875000*q*r**2*s**7 + 18798828125000*p**3*s**8 + 106048583984375*q**2*s**8 + 17761230468750*p*r*s**8
o[3] = 2800*p**9*q**8 + 55700*p**6*q**10 + 363600*p**3*q**12 + 777600*q**14 - 27200*p**10*q**6*r - 700200*p**7*q**8*r - 5726550*p**4*q**10*r - 15066000*p*q**12*r + 74700*p**11*q**4*r**2 + 2859575*p**8*q**6*r**2 + 31175725*p**5*q**8*r**2 + 103147650*p**2*q**10*r**2 - 40500*p**12*q**2*r**3 - 4274400*p**9*q**4*r**3 - 76065825*p**6*q**6*r**3 - 365623750*p**3*q**8*r**3 - 132264000*q**10*r**3 + 2192400*p**10*q**2*r**4 + 92562500*p**7*q**4*r**4 + 799193875*p**4*q**6*r**4 + 1188193125*p*q**8*r**4 - 41231500*p**8*q**2*r**5 - 914210000*p**5*q**4*r**5 - 3318853125*p**2*q**6*r**5 + 398850000*p**6*q**2*r**6 + 3944000000*p**3*q**4*r**6 + 2211312500*q**6*r**6 - 1817000000*p**4*q**2*r**7 - 6720000000*p*q**4*r**7 + 3900000000*p**2*q**2*r**8 + 75600*p**11*q**5*s + 1823100*p**8*q**7*s + 14534150*p**5*q**9*s + 38265750*p**2*q**11*s - 394200*p**12*q**3*r*s - 11453850*p**9*q**5*r*s - 101213000*p**6*q**7*r*s - 223565625*p**3*q**9*r*s + 415125000*q**11*r*s + 243000*p**13*q*r**2*s + 13654575*p**10*q**3*r**2*s + 163811725*p**7*q**5*r**2*s + 173461250*p**4*q**7*r**2*s - 3008671875*p*q**9*r**2*s - 2016900*p**11*q*r**3*s - 86576250*p**8*q**3*r**3*s - 324146625*p**5*q**5*r**3*s + 3378506250*p**2*q**7*r**3*s - 89211000*p**9*q*r**4*s - 55207500*p**6*q**3*r**4*s + 1493950000*p**3*q**5*r**4*s - 12573609375*q**7*r**4*s + 1140100000*p**7*q*r**5*s + 42500000*p**4*q**3*r**5*s + 21511250000*p*q**5*r**5*s - 4058000000*p**5*q*r**6*s + 6725000000*p**2*q**3*r**6*s - 1400000000*p**3*q*r**7*s - 39000000000*q**3*r**7*s + 510300*p**13*q**2*s**2 + 4814775*p**10*q**4*s**2 - 70265125*p**7*q**6*s**2 - 1016484375*p**4*q**8*s**2 - 3221100000*p*q**10*s**2 - 364500*p**14*r*s**2 + 30314250*p**11*q**2*r*s**2 + 1106765625*p**8*q**4*r*s**2 + 10984203125*p**5*q**6*r*s**2 + 33905812500*p**2*q**8*r*s**2 - 37980900*p**12*r**2*s**2 - 2142905625*p**9*q**2*r**2*s**2 - 26896125000*p**6*q**4*r**2*s**2 - 95551328125*p**3*q**6*r**2*s**2 + 11320312500*q**8*r**2*s**2 + 1743781500*p**10*r**3*s**2 + 35432262500*p**7*q**2*r**3*s**2 + 177855859375*p**4*q**4*r**3*s**2 + 121260546875*p*q**6*r**3*s**2 - 25943162500*p**8*r**4*s**2 - 249165500000*p**5*q**2*r**4*s**2 - 461739453125*p**2*q**4*r**4*s**2 + 177823750000*p**6*r**5*s**2 + 726225000000*p**3*q**2*r**5*s**2 + 404195312500*q**4*r**5*s**2 - 565875000000*p**4*r**6*s**2 - 407500000000*p*q**2*r**6*s**2 + 682500000000*p**2*r**7*s**2 - 59140125*p**12*q*s**3 - 1290515625*p**9*q**3*s**3 - 8785071875*p**6*q**5*s**3 - 15588281250*p**3*q**7*s**3 + 17505000000*q**9*s**3 + 896062500*p**10*q*r*s**3 + 2589750000*p**7*q**3*r*s**3 - 82700156250*p**4*q**5*r*s**3 - 347683593750*p*q**7*r*s**3 + 17022656250*p**8*q*r**2*s**3 + 320923593750*p**5*q**3*r**2*s**3 + 1042116875000*p**2*q**5*r**2*s**3 - 353262812500*p**6*q*r**3*s**3 - 2212664062500*p**3*q**3*r**3*s**3 - 1252408984375*q**5*r**3*s**3 + 1967362500000*p**4*q*r**4*s**3 + 1583343750000*p*q**3*r**4*s**3 - 3560625000000*p**2*q*r**5*s**3 - 975000000000*q*r**6*s**3 + 462459375*p**11*s**4 + 14210859375*p**8*q**2*s**4 + 99521718750*p**5*q**4*s**4 + 114955468750*p**2*q**6*s**4 - 17720859375*p**9*r*s**4 - 100320703125*p**6*q**2*r*s**4 + 1021943359375*p**3*q**4*r*s**4 + 1193203125000*q**6*r*s**4 + 171371250000*p**7*r**2*s**4 - 1113390625000*p**4*q**2*r**2*s**4 - 1211474609375*p*q**4*r**2*s**4 - 274056250000*p**5*r**3*s**4 + 8285166015625*p**2*q**2*r**3*s**4 - 2079375000000*p**3*r**4*s**4 + 5137304687500*q**2*r**4*s**4 + 6187500000000*p*r**5*s**4 - 135675000000*p**7*q*s**5 - 1275244140625*p**4*q**3*s**5 - 28388671875*p*q**5*s**5 + 1015166015625*p**5*q*r*s**5 - 10584423828125*p**2*q**3*r*s**5 + 3559570312500*p**3*q*r**2*s**5 - 6929931640625*q**3*r**2*s**5 - 32304687500000*p*q*r**3*s**5 + 430576171875*p**6*s**6 + 9397949218750*p**3*q**2*s**6 + 575195312500*q**4*s**6 - 4086425781250*p**4*r*s**6 + 42183837890625*p*q**2*r*s**6 + 8156494140625*p**2*r**2*s**6 + 12612304687500*r**3*s**6 - 25513916015625*p**2*q*s**7 - 37017822265625*q*r*s**7 + 18981933593750*p*s**8
o[2] = 1600*p**10*q**6 + 9200*p**7*q**8 - 126000*p**4*q**10 - 777600*p*q**12 - 14400*p**11*q**4*r - 119300*p**8*q**6*r + 1203225*p**5*q**8*r + 9412200*p**2*q**10*r + 32400*p**12*q**2*r**2 + 417950*p**9*q**4*r**2 - 4543725*p**6*q**6*r**2 - 49008125*p**3*q**8*r**2 - 24192000*q**10*r**2 - 292050*p**10*q**2*r**3 + 8760000*p**7*q**4*r**3 + 137506625*p**4*q**6*r**3 + 225438750*p*q**8*r**3 - 4213250*p**8*q**2*r**4 - 173595625*p**5*q**4*r**4 - 653003125*p**2*q**6*r**4 + 82575000*p**6*q**2*r**5 + 838125000*p**3*q**4*r**5 + 578562500*q**6*r**5 - 421500000*p**4*q**2*r**6 - 1796250000*p*q**4*r**6 + 1050000000*p**2*q**2*r**7 + 43200*p**12*q**3*s + 807300*p**9*q**5*s + 5328225*p**6*q**7*s + 16946250*p**3*q**9*s + 29565000*q**11*s - 194400*p**13*q*r*s - 5505300*p**10*q**3*r*s - 49886700*p**7*q**5*r*s - 178821875*p**4*q**7*r*s - 222750000*p*q**9*r*s + 6814800*p**11*q*r**2*s + 120525625*p**8*q**3*r**2*s + 526694500*p**5*q**5*r**2*s + 84065625*p**2*q**7*r**2*s - 123670500*p**9*q*r**3*s - 1106731875*p**6*q**3*r**3*s - 669556250*p**3*q**5*r**3*s - 2869265625*q**7*r**3*s + 1004350000*p**7*q*r**4*s + 3384375000*p**4*q**3*r**4*s + 5665625000*p*q**5*r**4*s - 3411000000*p**5*q*r**5*s - 418750000*p**2*q**3*r**5*s + 1700000000*p**3*q*r**6*s - 10500000000*q**3*r**6*s + 291600*p**14*s**2 + 9829350*p**11*q**2*s**2 + 114151875*p**8*q**4*s**2 + 522169375*p**5*q**6*s**2 + 716906250*p**2*q**8*s**2 - 18625950*p**12*r*s**2 - 387703125*p**9*q**2*r*s**2 - 2056109375*p**6*q**4*r*s**2 - 760203125*p**3*q**6*r*s**2 + 3071250000*q**8*r*s**2 + 512419500*p**10*r**2*s**2 + 5859053125*p**7*q**2*r**2*s**2 + 12154062500*p**4*q**4*r**2*s**2 + 15931640625*p*q**6*r**2*s**2 - 6598393750*p**8*r**3*s**2 - 43549625000*p**5*q**2*r**3*s**2 - 82011328125*p**2*q**4*r**3*s**2 + 43538125000*p**6*r**4*s**2 + 160831250000*p**3*q**2*r**4*s**2 + 99070312500*q**4*r**4*s**2 - 141812500000*p**4*r**5*s**2 - 117500000000*p*q**2*r**5*s**2 + 183750000000*p**2*r**6*s**2 - 154608750*p**10*q*s**3 - 3309468750*p**7*q**3*s**3 - 20834140625*p**4*q**5*s**3 - 34731562500*p*q**7*s**3 + 5970375000*p**8*q*r*s**3 + 68533281250*p**5*q**3*r*s**3 + 142698281250*p**2*q**5*r*s**3 - 74509140625*p**6*q*r**2*s**3 - 389148437500*p**3*q**3*r**2*s**3 - 270937890625*q**5*r**2*s**3 + 366696875000*p**4*q*r**3*s**3 + 400031250000*p*q**3*r**3*s**3 - 735156250000*p**2*q*r**4*s**3 - 262500000000*q*r**5*s**3 + 371250000*p**9*s**4 + 21315000000*p**6*q**2*s**4 + 179515625000*p**3*q**4*s**4 + 238406250000*q**6*s**4 - 9071015625*p**7*r*s**4 - 268945312500*p**4*q**2*r*s**4 - 379785156250*p*q**4*r*s**4 + 140262890625*p**5*r**2*s**4 + 1486259765625*p**2*q**2*r**2*s**4 - 806484375000*p**3*r**3*s**4 + 1066210937500*q**2*r**3*s**4 + 1722656250000*p*r**4*s**4 - 125648437500*p**5*q*s**5 - 1236279296875*p**2*q**3*s**5 + 1267871093750*p**3*q*r*s**5 - 1044677734375*q**3*r*s**5 - 6630859375000*p*q*r**2*s**5 + 160888671875*p**4*s**6 + 6352294921875*p*q**2*s**6 - 708740234375*p**2*r*s**6 + 3901367187500*r**2*s**6 - 8050537109375*q*s**7
o[1] = 2800*p**8*q**6 + 41300*p**5*q**8 + 151200*p**2*q**10 - 25200*p**9*q**4*r - 542600*p**6*q**6*r - 3397875*p**3*q**8*r - 5751000*q**10*r + 56700*p**10*q**2*r**2 + 1972125*p**7*q**4*r**2 + 18624250*p**4*q**6*r**2 + 50253750*p*q**8*r**2 - 1701000*p**8*q**2*r**3 - 32630625*p**5*q**4*r**3 - 139868750*p**2*q**6*r**3 + 18162500*p**6*q**2*r**4 + 177125000*p**3*q**4*r**4 + 121734375*q**6*r**4 - 100500000*p**4*q**2*r**5 - 386250000*p*q**4*r**5 + 225000000*p**2*q**2*r**6 + 75600*p**10*q**3*s + 1708800*p**7*q**5*s + 12836875*p**4*q**7*s + 32062500*p*q**9*s - 340200*p**11*q*r*s - 10185750*p**8*q**3*r*s - 97502750*p**5*q**5*r*s - 301640625*p**2*q**7*r*s + 7168500*p**9*q*r**2*s + 135960625*p**6*q**3*r**2*s + 587471875*p**3*q**5*r**2*s - 384750000*q**7*r**2*s - 29325000*p**7*q*r**3*s - 320625000*p**4*q**3*r**3*s + 523437500*p*q**5*r**3*s - 42000000*p**5*q*r**4*s + 343750000*p**2*q**3*r**4*s + 150000000*p**3*q*r**5*s - 2250000000*q**3*r**5*s + 510300*p**12*s**2 + 12808125*p**9*q**2*s**2 + 107062500*p**6*q**4*s**2 + 270312500*p**3*q**6*s**2 - 168750000*q**8*s**2 - 2551500*p**10*r*s**2 - 5062500*p**7*q**2*r*s**2 + 712343750*p**4*q**4*r*s**2 + 4788281250*p*q**6*r*s**2 - 256837500*p**8*r**2*s**2 - 3574812500*p**5*q**2*r**2*s**2 - 14967968750*p**2*q**4*r**2*s**2 + 4040937500*p**6*r**3*s**2 + 26400000000*p**3*q**2*r**3*s**2 + 17083984375*q**4*r**3*s**2 - 21812500000*p**4*r**4*s**2 - 24375000000*p*q**2*r**4*s**2 + 39375000000*p**2*r**5*s**2 - 127265625*p**5*q**3*s**3 - 680234375*p**2*q**5*s**3 - 2048203125*p**6*q*r*s**3 - 18794531250*p**3*q**3*r*s**3 - 25050000000*q**5*r*s**3 + 26621875000*p**4*q*r**2*s**3 + 37007812500*p*q**3*r**2*s**3 - 105468750000*p**2*q*r**3*s**3 - 56250000000*q*r**4*s**3 + 1124296875*p**7*s**4 + 9251953125*p**4*q**2*s**4 - 8007812500*p*q**4*s**4 - 4004296875*p**5*r*s**4 + 179931640625*p**2*q**2*r*s**4 - 75703125000*p**3*r**2*s**4 + 133447265625*q**2*r**2*s**4 + 363281250000*p*r**3*s**4 - 91552734375*p**3*q*s**5 - 19531250000*q**3*s**5 - 751953125000*p*q*r*s**5 + 157958984375*p**2*s**6 + 748291015625*r*s**6
o[0] = -14400*p**6*q**6 - 212400*p**3*q**8 - 777600*q**10 + 92100*p**7*q**4*r + 1689675*p**4*q**6*r + 7371000*p*q**8*r - 122850*p**8*q**2*r**2 - 3735250*p**5*q**4*r**2 - 22432500*p**2*q**6*r**2 + 2298750*p**6*q**2*r**3 + 29390625*p**3*q**4*r**3 + 18000000*q**6*r**3 - 17750000*p**4*q**2*r**4 - 62812500*p*q**4*r**4 + 37500000*p**2*q**2*r**5 - 51300*p**8*q**3*s - 768025*p**5*q**5*s - 2801250*p**2*q**7*s - 275400*p**9*q*r*s - 5479875*p**6*q**3*r*s - 35538750*p**3*q**5*r*s - 68850000*q**7*r*s + 12757500*p**7*q*r**2*s + 133640625*p**4*q**3*r**2*s + 222609375*p*q**5*r**2*s - 108500000*p**5*q*r**3*s - 290312500*p**2*q**3*r**3*s + 275000000*p**3*q*r**4*s - 375000000*q**3*r**4*s + 1931850*p**10*s**2 + 40213125*p**7*q**2*s**2 + 253921875*p**4*q**4*s**2 + 464062500*p*q**6*s**2 - 71077500*p**8*r*s**2 - 818746875*p**5*q**2*r*s**2 - 1882265625*p**2*q**4*r*s**2 + 826031250*p**6*r**2*s**2 + 4369687500*p**3*q**2*r**2*s**2 + 3107812500*q**4*r**2*s**2 - 3943750000*p**4*r**3*s**2 - 5000000000*p*q**2*r**3*s**2 + 6562500000*p**2*r**4*s**2 - 295312500*p**6*q*s**3 - 2938906250*p**3*q**3*s**3 - 4848750000*q**5*s**3 + 3791484375*p**4*q*r*s**3 + 7556250000*p*q**3*r*s**3 - 11960937500*p**2*q*r**2*s**3 - 9375000000*q*r**3*s**3 + 1668515625*p**5*s**4 + 20447265625*p**2*q**2*s**4 - 21955078125*p**3*r*s**4 + 18984375000*q**2*r*s**4 + 67382812500*p*r**2*s**4 - 120849609375*p*q*s**5 + 157226562500*s**6
return o
@property
def a(self):
p, q, r, s = self.p, self.q, self.r, self.s
a = [0]*6
a[5] = -100*p**7*q**7 - 2175*p**4*q**9 - 10500*p*q**11 + 1100*p**8*q**5*r + 27975*p**5*q**7*r + 152950*p**2*q**9*r - 4125*p**9*q**3*r**2 - 128875*p**6*q**5*r**2 - 830525*p**3*q**7*r**2 + 59450*q**9*r**2 + 5400*p**10*q*r**3 + 243800*p**7*q**3*r**3 + 2082650*p**4*q**5*r**3 - 333925*p*q**7*r**3 - 139200*p**8*q*r**4 - 2406000*p**5*q**3*r**4 - 122600*p**2*q**5*r**4 + 1254400*p**6*q*r**5 + 3776000*p**3*q**3*r**5 + 1832000*q**5*r**5 - 4736000*p**4*q*r**6 - 6720000*p*q**3*r**6 + 6400000*p**2*q*r**7 - 900*p**9*q**4*s - 37400*p**6*q**6*s - 281625*p**3*q**8*s - 435000*q**10*s + 6750*p**10*q**2*r*s + 322300*p**7*q**4*r*s + 2718575*p**4*q**6*r*s + 4214250*p*q**8*r*s - 16200*p**11*r**2*s - 859275*p**8*q**2*r**2*s - 8925475*p**5*q**4*r**2*s - 14427875*p**2*q**6*r**2*s + 453600*p**9*r**3*s + 10038400*p**6*q**2*r**3*s + 17397500*p**3*q**4*r**3*s - 11333125*q**6*r**3*s - 4451200*p**7*r**4*s - 15850000*p**4*q**2*r**4*s + 34000000*p*q**4*r**4*s + 17984000*p**5*r**5*s - 10000000*p**2*q**2*r**5*s - 25600000*p**3*r**6*s - 8000000*q**2*r**6*s + 6075*p**11*q*s**2 - 83250*p**8*q**3*s**2 - 1282500*p**5*q**5*s**2 - 2862500*p**2*q**7*s**2 + 724275*p**9*q*r*s**2 + 9807250*p**6*q**3*r*s**2 + 28374375*p**3*q**5*r*s**2 + 22212500*q**7*r*s**2 - 8982000*p**7*q*r**2*s**2 - 39600000*p**4*q**3*r**2*s**2 - 61746875*p*q**5*r**2*s**2 - 1010000*p**5*q*r**3*s**2 - 1000000*p**2*q**3*r**3*s**2 + 78000000*p**3*q*r**4*s**2 + 30000000*q**3*r**4*s**2 + 80000000*p*q*r**5*s**2 - 759375*p**10*s**3 - 9787500*p**7*q**2*s**3 - 39062500*p**4*q**4*s**3 - 52343750*p*q**6*s**3 + 12301875*p**8*r*s**3 + 98175000*p**5*q**2*r*s**3 + 225078125*p**2*q**4*r*s**3 - 54900000*p**6*r**2*s**3 - 310000000*p**3*q**2*r**2*s**3 - 7890625*q**4*r**2*s**3 + 51250000*p**4*r**3*s**3 - 420000000*p*q**2*r**3*s**3 + 110000000*p**2*r**4*s**3 - 200000000*r**5*s**3 + 2109375*p**6*q*s**4 - 21093750*p**3*q**3*s**4 - 89843750*q**5*s**4 + 182343750*p**4*q*r*s**4 + 733203125*p*q**3*r*s**4 - 196875000*p**2*q*r**2*s**4 + 1125000000*q*r**3*s**4 - 158203125*p**5*s**5 - 566406250*p**2*q**2*s**5 + 101562500*p**3*r*s**5 - 1669921875*q**2*r*s**5 + 1250000000*p*r**2*s**5 - 1220703125*p*q*s**6 + 6103515625*s**7
a[4] = 1000*p**5*q**7 + 7250*p**2*q**9 - 10800*p**6*q**5*r - 96900*p**3*q**7*r - 52500*q**9*r + 37400*p**7*q**3*r**2 + 470850*p**4*q**5*r**2 + 640600*p*q**7*r**2 - 39600*p**8*q*r**3 - 983600*p**5*q**3*r**3 - 2848100*p**2*q**5*r**3 + 814400*p**6*q*r**4 + 6076000*p**3*q**3*r**4 + 2308000*q**5*r**4 - 5024000*p**4*q*r**5 - 9680000*p*q**3*r**5 + 9600000*p**2*q*r**6 + 13800*p**7*q**4*s + 94650*p**4*q**6*s - 26500*p*q**8*s - 86400*p**8*q**2*r*s - 816500*p**5*q**4*r*s - 257500*p**2*q**6*r*s + 91800*p**9*r**2*s + 1853700*p**6*q**2*r**2*s + 630000*p**3*q**4*r**2*s - 8971250*q**6*r**2*s - 2071200*p**7*r**3*s - 7240000*p**4*q**2*r**3*s + 29375000*p*q**4*r**3*s + 14416000*p**5*r**4*s - 5200000*p**2*q**2*r**4*s - 30400000*p**3*r**5*s - 12000000*q**2*r**5*s + 64800*p**9*q*s**2 + 567000*p**6*q**3*s**2 + 1655000*p**3*q**5*s**2 + 6987500*q**7*s**2 + 337500*p**7*q*r*s**2 + 8462500*p**4*q**3*r*s**2 - 5812500*p*q**5*r*s**2 - 24930000*p**5*q*r**2*s**2 - 69125000*p**2*q**3*r**2*s**2 + 103500000*p**3*q*r**3*s**2 + 30000000*q**3*r**3*s**2 + 90000000*p*q*r**4*s**2 - 708750*p**8*s**3 - 5400000*p**5*q**2*s**3 + 8906250*p**2*q**4*s**3 + 18562500*p**6*r*s**3 - 625000*p**3*q**2*r*s**3 + 29687500*q**4*r*s**3 - 75000000*p**4*r**2*s**3 - 416250000*p*q**2*r**2*s**3 + 60000000*p**2*r**3*s**3 - 300000000*r**4*s**3 + 71718750*p**4*q*s**4 + 189062500*p*q**3*s**4 + 210937500*p**2*q*r*s**4 + 1187500000*q*r**2*s**4 - 187500000*p**3*s**5 - 800781250*q**2*s**5 - 390625000*p*r*s**5
a[3] = -500*p**6*q**5 - 6350*p**3*q**7 - 19800*q**9 + 3750*p**7*q**3*r + 65100*p**4*q**5*r + 264950*p*q**7*r - 6750*p**8*q*r**2 - 209050*p**5*q**3*r**2 - 1217250*p**2*q**5*r**2 + 219000*p**6*q*r**3 + 2510000*p**3*q**3*r**3 + 1098500*q**5*r**3 - 2068000*p**4*q*r**4 - 5060000*p*q**3*r**4 + 5200000*p**2*q*r**5 - 6750*p**8*q**2*s - 96350*p**5*q**4*s - 346000*p**2*q**6*s + 20250*p**9*r*s + 459900*p**6*q**2*r*s + 1828750*p**3*q**4*r*s - 2930000*q**6*r*s - 594000*p**7*r**2*s - 4301250*p**4*q**2*r**2*s + 10906250*p*q**4*r**2*s + 5252000*p**5*r**3*s - 1450000*p**2*q**2*r**3*s - 12800000*p**3*r**4*s - 6500000*q**2*r**4*s + 74250*p**7*q*s**2 + 1418750*p**4*q**3*s**2 + 5956250*p*q**5*s**2 - 4297500*p**5*q*r*s**2 - 29906250*p**2*q**3*r*s**2 + 31500000*p**3*q*r**2*s**2 + 12500000*q**3*r**2*s**2 + 35000000*p*q*r**3*s**2 + 1350000*p**6*s**3 + 6093750*p**3*q**2*s**3 + 17500000*q**4*s**3 - 7031250*p**4*r*s**3 - 127812500*p*q**2*r*s**3 + 18750000*p**2*r**2*s**3 - 162500000*r**3*s**3 + 107812500*p**2*q*s**4 + 460937500*q*r*s**4 - 214843750*p*s**5
a[2] = 1950*p**4*q**5 + 14100*p*q**7 - 14350*p**5*q**3*r - 125600*p**2*q**5*r + 27900*p**6*q*r**2 + 402250*p**3*q**3*r**2 + 288250*q**5*r**2 - 436000*p**4*q*r**3 - 1345000*p*q**3*r**3 + 1400000*p**2*q*r**4 + 9450*p**6*q**2*s - 1250*p**3*q**4*s - 465000*q**6*s - 49950*p**7*r*s - 302500*p**4*q**2*r*s + 1718750*p*q**4*r*s + 834000*p**5*r**2*s + 437500*p**2*q**2*r**2*s - 3100000*p**3*r**3*s - 1750000*q**2*r**3*s - 292500*p**5*q*s**2 - 1937500*p**2*q**3*s**2 + 3343750*p**3*q*r*s**2 + 1875000*q**3*r*s**2 + 8125000*p*q*r**2*s**2 - 1406250*p**4*s**3 - 12343750*p*q**2*s**3 + 5312500*p**2*r*s**3 - 43750000*r**2*s**3 + 74218750*q*s**4
a[1] = -300*p**5*q**3 - 2150*p**2*q**5 + 1350*p**6*q*r + 21500*p**3*q**3*r + 61500*q**5*r - 42000*p**4*q*r**2 - 290000*p*q**3*r**2 + 300000*p**2*q*r**3 - 4050*p**7*s - 45000*p**4*q**2*s - 125000*p*q**4*s + 108000*p**5*r*s + 643750*p**2*q**2*r*s - 700000*p**3*r**2*s - 375000*q**2*r**2*s - 93750*p**3*q*s**2 - 312500*q**3*s**2 + 1875000*p*q*r*s**2 - 1406250*p**2*s**3 - 9375000*r*s**3
a[0] = 1250*p**3*q**3 + 9000*q**5 - 4500*p**4*q*r - 46250*p*q**3*r + 50000*p**2*q*r**2 + 6750*p**5*s + 43750*p**2*q**2*s - 75000*p**3*r*s - 62500*q**2*r*s + 156250*p*q*s**2 - 1562500*s**3
return a
@property
def c(self):
p, q, r, s = self.p, self.q, self.r, self.s
c = [0]*6
c[5] = -40*p**5*q**11 - 270*p**2*q**13 + 700*p**6*q**9*r + 5165*p**3*q**11*r + 540*q**13*r - 4230*p**7*q**7*r**2 - 31845*p**4*q**9*r**2 + 20880*p*q**11*r**2 + 9645*p**8*q**5*r**3 + 57615*p**5*q**7*r**3 - 358255*p**2*q**9*r**3 - 1880*p**9*q**3*r**4 + 114020*p**6*q**5*r**4 + 2012190*p**3*q**7*r**4 - 26855*q**9*r**4 - 14400*p**10*q*r**5 - 470400*p**7*q**3*r**5 - 5088640*p**4*q**5*r**5 + 920*p*q**7*r**5 + 332800*p**8*q*r**6 + 5797120*p**5*q**3*r**6 + 1608000*p**2*q**5*r**6 - 2611200*p**6*q*r**7 - 7424000*p**3*q**3*r**7 - 2323200*q**5*r**7 + 8601600*p**4*q*r**8 + 9472000*p*q**3*r**8 - 10240000*p**2*q*r**9 - 3060*p**7*q**8*s - 39085*p**4*q**10*s - 132300*p*q**12*s + 36580*p**8*q**6*r*s + 520185*p**5*q**8*r*s + 1969860*p**2*q**10*r*s - 144045*p**9*q**4*r**2*s - 2438425*p**6*q**6*r**2*s - 10809475*p**3*q**8*r**2*s + 518850*q**10*r**2*s + 182520*p**10*q**2*r**3*s + 4533930*p**7*q**4*r**3*s + 26196770*p**4*q**6*r**3*s - 4542325*p*q**8*r**3*s + 21600*p**11*r**4*s - 2208080*p**8*q**2*r**4*s - 24787960*p**5*q**4*r**4*s + 10813900*p**2*q**6*r**4*s - 499200*p**9*r**5*s + 3827840*p**6*q**2*r**5*s + 9596000*p**3*q**4*r**5*s + 22662000*q**6*r**5*s + 3916800*p**7*r**6*s - 29952000*p**4*q**2*r**6*s - 90800000*p*q**4*r**6*s - 12902400*p**5*r**7*s + 87040000*p**2*q**2*r**7*s + 15360000*p**3*r**8*s + 12800000*q**2*r**8*s - 38070*p**9*q**5*s**2 - 566700*p**6*q**7*s**2 - 2574375*p**3*q**9*s**2 - 1822500*q**11*s**2 + 292815*p**10*q**3*r*s**2 + 5170280*p**7*q**5*r*s**2 + 27918125*p**4*q**7*r*s**2 + 21997500*p*q**9*r*s**2 - 573480*p**11*q*r**2*s**2 - 14566350*p**8*q**3*r**2*s**2 - 104851575*p**5*q**5*r**2*s**2 - 96448750*p**2*q**7*r**2*s**2 + 11001240*p**9*q*r**3*s**2 + 147798600*p**6*q**3*r**3*s**2 + 158632750*p**3*q**5*r**3*s**2 - 78222500*q**7*r**3*s**2 - 62819200*p**7*q*r**4*s**2 - 136160000*p**4*q**3*r**4*s**2 + 317555000*p*q**5*r**4*s**2 + 160224000*p**5*q*r**5*s**2 - 267600000*p**2*q**3*r**5*s**2 - 153600000*p**3*q*r**6*s**2 - 120000000*q**3*r**6*s**2 - 32000000*p*q*r**7*s**2 - 127575*p**11*q**2*s**3 - 2148750*p**8*q**4*s**3 - 13652500*p**5*q**6*s**3 - 19531250*p**2*q**8*s**3 + 495720*p**12*r*s**3 + 11856375*p**9*q**2*r*s**3 + 107807500*p**6*q**4*r*s**3 + 222334375*p**3*q**6*r*s**3 + 105062500*q**8*r*s**3 - 11566800*p**10*r**2*s**3 - 216787500*p**7*q**2*r**2*s**3 - 633437500*p**4*q**4*r**2*s**3 - 504484375*p*q**6*r**2*s**3 + 90918000*p**8*r**3*s**3 + 567080000*p**5*q**2*r**3*s**3 + 692937500*p**2*q**4*r**3*s**3 - 326640000*p**6*r**4*s**3 - 339000000*p**3*q**2*r**4*s**3 + 369250000*q**4*r**4*s**3 + 560000000*p**4*r**5*s**3 + 508000000*p*q**2*r**5*s**3 - 480000000*p**2*r**6*s**3 + 320000000*r**7*s**3 - 455625*p**10*q*s**4 - 27562500*p**7*q**3*s**4 - 120593750*p**4*q**5*s**4 - 60312500*p*q**7*s**4 + 110615625*p**8*q*r*s**4 + 662984375*p**5*q**3*r*s**4 + 528515625*p**2*q**5*r*s**4 - 541687500*p**6*q*r**2*s**4 - 1262343750*p**3*q**3*r**2*s**4 - 466406250*q**5*r**2*s**4 + 633000000*p**4*q*r**3*s**4 - 1264375000*p*q**3*r**3*s**4 + 1085000000*p**2*q*r**4*s**4 - 2700000000*q*r**5*s**4 - 68343750*p**9*s**5 - 478828125*p**6*q**2*s**5 - 355468750*p**3*q**4*s**5 - 11718750*q**6*s**5 + 718031250*p**7*r*s**5 + 1658593750*p**4*q**2*r*s**5 + 2212890625*p*q**4*r*s**5 - 2855625000*p**5*r**2*s**5 - 4273437500*p**2*q**2*r**2*s**5 + 4537500000*p**3*r**3*s**5 + 8031250000*q**2*r**3*s**5 - 1750000000*p*r**4*s**5 + 1353515625*p**5*q*s**6 + 1562500000*p**2*q**3*s**6 - 3964843750*p**3*q*r*s**6 - 7226562500*q**3*r*s**6 + 1953125000*p*q*r**2*s**6 - 1757812500*p**4*s**7 - 3173828125*p*q**2*s**7 + 6445312500*p**2*r*s**7 - 3906250000*r**2*s**7 + 6103515625*q*s**8
c[4] = 40*p**6*q**9 + 110*p**3*q**11 - 1080*q**13 - 560*p**7*q**7*r - 1780*p**4*q**9*r + 17370*p*q**11*r + 2850*p**8*q**5*r**2 + 10520*p**5*q**7*r**2 - 115910*p**2*q**9*r**2 - 6090*p**9*q**3*r**3 - 25330*p**6*q**5*r**3 + 448740*p**3*q**7*r**3 + 128230*q**9*r**3 + 4320*p**10*q*r**4 + 16960*p**7*q**3*r**4 - 1143600*p**4*q**5*r**4 - 1410310*p*q**7*r**4 + 3840*p**8*q*r**5 + 1744480*p**5*q**3*r**5 + 5619520*p**2*q**5*r**5 - 1198080*p**6*q*r**6 - 10579200*p**3*q**3*r**6 - 2940800*q**5*r**6 + 8294400*p**4*q*r**7 + 13568000*p*q**3*r**7 - 15360000*p**2*q*r**8 + 840*p**8*q**6*s + 7580*p**5*q**8*s + 24420*p**2*q**10*s - 8100*p**9*q**4*r*s - 94100*p**6*q**6*r*s - 473000*p**3*q**8*r*s - 473400*q**10*r*s + 22680*p**10*q**2*r**2*s + 374370*p**7*q**4*r**2*s + 2888020*p**4*q**6*r**2*s + 5561050*p*q**8*r**2*s - 12960*p**11*r**3*s - 485820*p**8*q**2*r**3*s - 6723440*p**5*q**4*r**3*s - 23561400*p**2*q**6*r**3*s + 190080*p**9*r**4*s + 5894880*p**6*q**2*r**4*s + 50882000*p**3*q**4*r**4*s + 22411500*q**6*r**4*s - 258560*p**7*r**5*s - 46248000*p**4*q**2*r**5*s - 103800000*p*q**4*r**5*s - 3737600*p**5*r**6*s + 119680000*p**2*q**2*r**6*s + 10240000*p**3*r**7*s + 19200000*q**2*r**7*s + 7290*p**10*q**3*s**2 + 117360*p**7*q**5*s**2 + 691250*p**4*q**7*s**2 - 198750*p*q**9*s**2 - 36450*p**11*q*r*s**2 - 854550*p**8*q**3*r*s**2 - 7340700*p**5*q**5*r*s**2 - 2028750*p**2*q**7*r*s**2 + 995490*p**9*q*r**2*s**2 + 18896600*p**6*q**3*r**2*s**2 + 5026500*p**3*q**5*r**2*s**2 - 52272500*q**7*r**2*s**2 - 16636800*p**7*q*r**3*s**2 - 43200000*p**4*q**3*r**3*s**2 + 223426250*p*q**5*r**3*s**2 + 112068000*p**5*q*r**4*s**2 - 177000000*p**2*q**3*r**4*s**2 - 244000000*p**3*q*r**5*s**2 - 156000000*q**3*r**5*s**2 + 43740*p**12*s**3 + 1032750*p**9*q**2*s**3 + 8602500*p**6*q**4*s**3 + 15606250*p**3*q**6*s**3 + 39625000*q**8*s**3 - 1603800*p**10*r*s**3 - 26932500*p**7*q**2*r*s**3 - 19562500*p**4*q**4*r*s**3 - 152000000*p*q**6*r*s**3 + 25555500*p**8*r**2*s**3 + 16230000*p**5*q**2*r**2*s**3 + 42187500*p**2*q**4*r**2*s**3 - 165660000*p**6*r**3*s**3 + 373500000*p**3*q**2*r**3*s**3 + 332937500*q**4*r**3*s**3 + 465000000*p**4*r**4*s**3 + 586000000*p*q**2*r**4*s**3 - 592000000*p**2*r**5*s**3 + 480000000*r**6*s**3 - 1518750*p**8*q*s**4 - 62531250*p**5*q**3*s**4 + 7656250*p**2*q**5*s**4 + 184781250*p**6*q*r*s**4 - 15781250*p**3*q**3*r*s**4 - 135156250*q**5*r*s**4 - 1148250000*p**4*q*r**2*s**4 - 2121406250*p*q**3*r**2*s**4 + 1990000000*p**2*q*r**3*s**4 - 3150000000*q*r**4*s**4 - 2531250*p**7*s**5 + 660937500*p**4*q**2*s**5 + 1339843750*p*q**4*s**5 - 33750000*p**5*r*s**5 - 679687500*p**2*q**2*r*s**5 + 6250000*p**3*r**2*s**5 + 6195312500*q**2*r**2*s**5 + 1125000000*p*r**3*s**5 - 996093750*p**3*q*s**6 - 3125000000*q**3*s**6 - 3222656250*p*q*r*s**6 + 1171875000*p**2*s**7 + 976562500*r*s**7
c[3] = 80*p**4*q**9 + 540*p*q**11 - 600*p**5*q**7*r - 4770*p**2*q**9*r + 1230*p**6*q**5*r**2 + 20900*p**3*q**7*r**2 + 47250*q**9*r**2 - 710*p**7*q**3*r**3 - 84950*p**4*q**5*r**3 - 526310*p*q**7*r**3 + 720*p**8*q*r**4 + 216280*p**5*q**3*r**4 + 2068020*p**2*q**5*r**4 - 198080*p**6*q*r**5 - 3703200*p**3*q**3*r**5 - 1423600*q**5*r**5 + 2860800*p**4*q*r**6 + 7056000*p*q**3*r**6 - 8320000*p**2*q*r**7 - 2720*p**6*q**6*s - 46350*p**3*q**8*s - 178200*q**10*s + 25740*p**7*q**4*r*s + 489490*p**4*q**6*r*s + 2152350*p*q**8*r*s - 61560*p**8*q**2*r**2*s - 1568150*p**5*q**4*r**2*s - 9060500*p**2*q**6*r**2*s + 24840*p**9*r**3*s + 1692380*p**6*q**2*r**3*s + 18098250*p**3*q**4*r**3*s + 9387750*q**6*r**3*s - 382560*p**7*r**4*s - 16818000*p**4*q**2*r**4*s - 49325000*p*q**4*r**4*s + 1212800*p**5*r**5*s + 64840000*p**2*q**2*r**5*s - 320000*p**3*r**6*s + 10400000*q**2*r**6*s - 36450*p**8*q**3*s**2 - 588350*p**5*q**5*s**2 - 2156250*p**2*q**7*s**2 + 123930*p**9*q*r*s**2 + 2879700*p**6*q**3*r*s**2 + 12548000*p**3*q**5*r*s**2 - 14445000*q**7*r*s**2 - 3233250*p**7*q*r**2*s**2 - 28485000*p**4*q**3*r**2*s**2 + 72231250*p*q**5*r**2*s**2 + 32093000*p**5*q*r**3*s**2 - 61275000*p**2*q**3*r**3*s**2 - 107500000*p**3*q*r**4*s**2 - 78500000*q**3*r**4*s**2 + 22000000*p*q*r**5*s**2 - 72900*p**10*s**3 - 1215000*p**7*q**2*s**3 - 2937500*p**4*q**4*s**3 + 9156250*p*q**6*s**3 + 2612250*p**8*r*s**3 + 16560000*p**5*q**2*r*s**3 - 75468750*p**2*q**4*r*s**3 - 32737500*p**6*r**2*s**3 + 169062500*p**3*q**2*r**2*s**3 + 121718750*q**4*r**2*s**3 + 160250000*p**4*r**3*s**3 + 219750000*p*q**2*r**3*s**3 - 317000000*p**2*r**4*s**3 + 260000000*r**5*s**3 + 2531250*p**6*q*s**4 + 22500000*p**3*q**3*s**4 + 39843750*q**5*s**4 - 266343750*p**4*q*r*s**4 - 776406250*p*q**3*r*s**4 + 789062500*p**2*q*r**2*s**4 - 1368750000*q*r**3*s**4 + 67500000*p**5*s**5 + 441406250*p**2*q**2*s**5 - 311718750*p**3*r*s**5 + 1785156250*q**2*r*s**5 + 546875000*p*r**2*s**5 - 1269531250*p*q*s**6 + 488281250*s**7
c[2] = 120*p**5*q**7 + 810*p**2*q**9 - 1280*p**6*q**5*r - 9160*p**3*q**7*r + 3780*q**9*r + 4530*p**7*q**3*r**2 + 36640*p**4*q**5*r**2 - 45270*p*q**7*r**2 - 5400*p**8*q*r**3 - 60920*p**5*q**3*r**3 + 200050*p**2*q**5*r**3 + 31200*p**6*q*r**4 - 476000*p**3*q**3*r**4 - 378200*q**5*r**4 + 521600*p**4*q*r**5 + 1872000*p*q**3*r**5 - 2240000*p**2*q*r**6 + 1440*p**7*q**4*s + 15310*p**4*q**6*s + 59400*p*q**8*s - 9180*p**8*q**2*r*s - 115240*p**5*q**4*r*s - 589650*p**2*q**6*r*s + 16200*p**9*r**2*s + 316710*p**6*q**2*r**2*s + 2547750*p**3*q**4*r**2*s + 2178000*q**6*r**2*s - 259200*p**7*r**3*s - 4123000*p**4*q**2*r**3*s - 11700000*p*q**4*r**3*s + 937600*p**5*r**4*s + 16340000*p**2*q**2*r**4*s - 640000*p**3*r**5*s + 2800000*q**2*r**5*s - 2430*p**9*q*s**2 - 54450*p**6*q**3*s**2 - 285500*p**3*q**5*s**2 - 2767500*q**7*s**2 + 43200*p**7*q*r*s**2 - 916250*p**4*q**3*r*s**2 + 14482500*p*q**5*r*s**2 + 4806000*p**5*q*r**2*s**2 - 13212500*p**2*q**3*r**2*s**2 - 25400000*p**3*q*r**3*s**2 - 18750000*q**3*r**3*s**2 + 8000000*p*q*r**4*s**2 + 121500*p**8*s**3 + 2058750*p**5*q**2*s**3 - 6656250*p**2*q**4*s**3 - 6716250*p**6*r*s**3 + 24125000*p**3*q**2*r*s**3 + 23875000*q**4*r*s**3 + 43125000*p**4*r**2*s**3 + 45750000*p*q**2*r**2*s**3 - 87500000*p**2*r**3*s**3 + 70000000*r**4*s**3 - 44437500*p**4*q*s**4 - 107968750*p*q**3*s**4 + 159531250*p**2*q*r*s**4 - 284375000*q*r**2*s**4 + 7031250*p**3*s**5 + 265625000*q**2*s**5 + 31250000*p*r*s**5
c[1] = 160*p**3*q**7 + 1080*q**9 - 1080*p**4*q**5*r - 8730*p*q**7*r + 1510*p**5*q**3*r**2 + 20420*p**2*q**5*r**2 + 720*p**6*q*r**3 - 23200*p**3*q**3*r**3 - 79900*q**5*r**3 + 35200*p**4*q*r**4 + 404000*p*q**3*r**4 - 480000*p**2*q*r**5 + 960*p**5*q**4*s + 2850*p**2*q**6*s + 540*p**6*q**2*r*s + 63500*p**3*q**4*r*s + 319500*q**6*r*s - 7560*p**7*r**2*s - 253500*p**4*q**2*r**2*s - 1806250*p*q**4*r**2*s + 91200*p**5*r**3*s + 2600000*p**2*q**2*r**3*s - 80000*p**3*r**4*s + 600000*q**2*r**4*s - 4050*p**7*q*s**2 - 120000*p**4*q**3*s**2 - 273750*p*q**5*s**2 + 425250*p**5*q*r*s**2 + 2325000*p**2*q**3*r*s**2 - 5400000*p**3*q*r**2*s**2 - 2875000*q**3*r**2*s**2 + 1500000*p*q*r**3*s**2 - 303750*p**6*s**3 - 843750*p**3*q**2*s**3 - 812500*q**4*s**3 + 5062500*p**4*r*s**3 + 13312500*p*q**2*r*s**3 - 14500000*p**2*r**2*s**3 + 15000000*r**3*s**3 - 3750000*p**2*q*s**4 - 35937500*q*r*s**4 + 11718750*p*s**5
c[0] = 80*p**4*q**5 + 540*p*q**7 - 600*p**5*q**3*r - 4770*p**2*q**5*r + 1080*p**6*q*r**2 + 11200*p**3*q**3*r**2 - 12150*q**5*r**2 - 4800*p**4*q*r**3 + 64000*p*q**3*r**3 - 80000*p**2*q*r**4 + 1080*p**6*q**2*s + 13250*p**3*q**4*s + 54000*q**6*s - 3240*p**7*r*s - 56250*p**4*q**2*r*s - 337500*p*q**4*r*s + 43200*p**5*r**2*s + 560000*p**2*q**2*r**2*s - 80000*p**3*r**3*s + 100000*q**2*r**3*s + 6750*p**5*q*s**2 + 225000*p**2*q**3*s**2 - 900000*p**3*q*r*s**2 - 562500*q**3*r*s**2 + 500000*p*q*r**2*s**2 + 843750*p**4*s**3 + 1937500*p*q**2*s**3 - 3000000*p**2*r*s**3 + 2500000*r**2*s**3 - 5468750*q*s**4
return c
@property
def F(self):
p, q, r, s = self.p, self.q, self.r, self.s
F = 4*p**6*q**6 + 59*p**3*q**8 + 216*q**10 - 36*p**7*q**4*r - 623*p**4*q**6*r - 2610*p*q**8*r + 81*p**8*q**2*r**2 + 2015*p**5*q**4*r**2 + 10825*p**2*q**6*r**2 - 1800*p**6*q**2*r**3 - 17500*p**3*q**4*r**3 + 625*q**6*r**3 + 10000*p**4*q**2*r**4 + 108*p**8*q**3*s + 1584*p**5*q**5*s + 5700*p**2*q**7*s - 486*p**9*q*r*s - 9720*p**6*q**3*r*s - 45050*p**3*q**5*r*s - 9000*q**7*r*s + 10800*p**7*q*r**2*s + 92500*p**4*q**3*r**2*s + 32500*p*q**5*r**2*s - 60000*p**5*q*r**3*s - 50000*p**2*q**3*r**3*s + 729*p**10*s**2 + 12150*p**7*q**2*s**2 + 60000*p**4*q**4*s**2 + 93750*p*q**6*s**2 - 18225*p**8*r*s**2 - 175500*p**5*q**2*r*s**2 - 478125*p**2*q**4*r*s**2 + 135000*p**6*r**2*s**2 + 850000*p**3*q**2*r**2*s**2 + 15625*q**4*r**2*s**2 - 250000*p**4*r**3*s**2 + 225000*p**3*q**3*s**3 + 175000*q**5*s**3 - 1012500*p**4*q*r*s**3 - 1187500*p*q**3*r*s**3 + 1250000*p**2*q*r**2*s**3 + 928125*p**5*s**4 + 1875000*p**2*q**2*s**4 - 2812500*p**3*r*s**4 - 390625*q**2*r*s**4 - 9765625*s**6
return F
def l0(self, theta):
p, q, r, s, F = self.p, self.q, self.r, self.s, self.F
a = self.a
l0 = Poly(a, x).eval(theta)/F
return l0
def T(self, theta, d):
p, q, r, s, F = self.p, self.q, self.r, self.s, self.F
T = [0]*5
b = self.b
# Note that the order of sublists of the b's has been reversed compared to the paper
T[1] = -Poly(b[1], x).eval(theta)/(2*F)
T[2] = Poly(b[2], x).eval(theta)/(2*d*F)
T[3] = Poly(b[3], x).eval(theta)/(2*F)
T[4] = Poly(b[4], x).eval(theta)/(2*d*F)
return T
def order(self, theta, d):
p, q, r, s, F = self.p, self.q, self.r, self.s, self.F
o = self.o
order = Poly(o, x).eval(theta)/(d*F)
return N(order)
def uv(self, theta, d):
c = self.c
u = S(-25*self.q/2)
v = Poly(c, x).eval(theta)/(2*d*self.F)
return N(u), N(v)
@property
def zeta(self):
return [self.zeta1, self.zeta2, self.zeta3, self.zeta4]
| bsd-3-clause |
kerr-huang/SL4A | python/src/Lib/ctypes/__init__.py | 58 | 17311 | ######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
"""create and manipulate C data types in Python"""
import os as _os, sys as _sys
__version__ = "1.1.0"
from _ctypes import Union, Structure, Array
from _ctypes import _Pointer
from _ctypes import CFuncPtr as _CFuncPtr
from _ctypes import __version__ as _ctypes_version
from _ctypes import RTLD_LOCAL, RTLD_GLOBAL
from _ctypes import ArgumentError
from struct import calcsize as _calcsize
if __version__ != _ctypes_version:
raise Exception("Version number mismatch", __version__, _ctypes_version)
if _os.name in ("nt", "ce"):
from _ctypes import FormatError
DEFAULT_MODE = RTLD_LOCAL
if _os.name == "posix" and _sys.platform == "darwin":
# On OS X 10.3, we use RTLD_GLOBAL as default mode
# because RTLD_LOCAL does not work at least on some
# libraries. OS X 10.3 is Darwin 7, so we check for
# that.
if int(_os.uname()[2].split('.')[0]) < 8:
DEFAULT_MODE = RTLD_GLOBAL
from _ctypes import FUNCFLAG_CDECL as _FUNCFLAG_CDECL, \
FUNCFLAG_PYTHONAPI as _FUNCFLAG_PYTHONAPI, \
FUNCFLAG_USE_ERRNO as _FUNCFLAG_USE_ERRNO, \
FUNCFLAG_USE_LASTERROR as _FUNCFLAG_USE_LASTERROR
"""
WINOLEAPI -> HRESULT
WINOLEAPI_(type)
STDMETHODCALLTYPE
STDMETHOD(name)
STDMETHOD_(type, name)
STDAPICALLTYPE
"""
def create_string_buffer(init, size=None):
"""create_string_buffer(aString) -> character array
create_string_buffer(anInteger) -> character array
create_string_buffer(aString, anInteger) -> character array
"""
if isinstance(init, (str, unicode)):
if size is None:
size = len(init)+1
buftype = c_char * size
buf = buftype()
buf.value = init
return buf
elif isinstance(init, (int, long)):
buftype = c_char * init
buf = buftype()
return buf
raise TypeError(init)
def c_buffer(init, size=None):
## "deprecated, use create_string_buffer instead"
## import warnings
## warnings.warn("c_buffer is deprecated, use create_string_buffer instead",
## DeprecationWarning, stacklevel=2)
return create_string_buffer(init, size)
_c_functype_cache = {}
def CFUNCTYPE(restype, *argtypes, **kw):
"""CFUNCTYPE(restype, *argtypes,
use_errno=False, use_last_error=False) -> function prototype.
restype: the result type
argtypes: a sequence specifying the argument types
The function prototype can be called in different ways to create a
callable object:
prototype(integer address) -> foreign function
prototype(callable) -> create and return a C callable function from callable
prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
prototype((function name, dll object)[, paramflags]) -> foreign function exported by name
"""
flags = _FUNCFLAG_CDECL
if kw.pop("use_errno", False):
flags |= _FUNCFLAG_USE_ERRNO
if kw.pop("use_last_error", False):
flags |= _FUNCFLAG_USE_LASTERROR
if kw:
raise ValueError("unexpected keyword argument(s) %s" % kw.keys())
try:
return _c_functype_cache[(restype, argtypes, flags)]
except KeyError:
class CFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = flags
_c_functype_cache[(restype, argtypes, flags)] = CFunctionType
return CFunctionType
if _os.name in ("nt", "ce"):
from _ctypes import LoadLibrary as _dlopen
from _ctypes import FUNCFLAG_STDCALL as _FUNCFLAG_STDCALL
if _os.name == "ce":
# 'ce' doesn't have the stdcall calling convention
_FUNCFLAG_STDCALL = _FUNCFLAG_CDECL
_win_functype_cache = {}
def WINFUNCTYPE(restype, *argtypes, **kw):
# docstring set later (very similar to CFUNCTYPE.__doc__)
flags = _FUNCFLAG_STDCALL
if kw.pop("use_errno", False):
flags |= _FUNCFLAG_USE_ERRNO
if kw.pop("use_last_error", False):
flags |= _FUNCFLAG_USE_LASTERROR
if kw:
raise ValueError("unexpected keyword argument(s) %s" % kw.keys())
try:
return _win_functype_cache[(restype, argtypes, flags)]
except KeyError:
class WinFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = flags
_win_functype_cache[(restype, argtypes, flags)] = WinFunctionType
return WinFunctionType
if WINFUNCTYPE.__doc__:
WINFUNCTYPE.__doc__ = CFUNCTYPE.__doc__.replace("CFUNCTYPE", "WINFUNCTYPE")
elif _os.name == "posix":
from _ctypes import dlopen as _dlopen
from _ctypes import sizeof, byref, addressof, alignment, resize
from _ctypes import get_errno, set_errno
from _ctypes import _SimpleCData
def _check_size(typ, typecode=None):
# Check if sizeof(ctypes_type) against struct.calcsize. This
# should protect somewhat against a misconfigured libffi.
from struct import calcsize
if typecode is None:
# Most _type_ codes are the same as used in struct
typecode = typ._type_
actual, required = sizeof(typ), calcsize(typecode)
if actual != required:
raise SystemError("sizeof(%s) wrong: %d instead of %d" % \
(typ, actual, required))
class py_object(_SimpleCData):
_type_ = "O"
def __repr__(self):
try:
return super(py_object, self).__repr__()
except ValueError:
return "%s(<NULL>)" % type(self).__name__
_check_size(py_object, "P")
class c_short(_SimpleCData):
_type_ = "h"
_check_size(c_short)
class c_ushort(_SimpleCData):
_type_ = "H"
_check_size(c_ushort)
class c_long(_SimpleCData):
_type_ = "l"
_check_size(c_long)
class c_ulong(_SimpleCData):
_type_ = "L"
_check_size(c_ulong)
if _calcsize("i") == _calcsize("l"):
# if int and long have the same size, make c_int an alias for c_long
c_int = c_long
c_uint = c_ulong
else:
class c_int(_SimpleCData):
_type_ = "i"
_check_size(c_int)
class c_uint(_SimpleCData):
_type_ = "I"
_check_size(c_uint)
class c_float(_SimpleCData):
_type_ = "f"
_check_size(c_float)
class c_double(_SimpleCData):
_type_ = "d"
_check_size(c_double)
class c_longdouble(_SimpleCData):
_type_ = "g"
if sizeof(c_longdouble) == sizeof(c_double):
c_longdouble = c_double
if _calcsize("l") == _calcsize("q"):
# if long and long long have the same size, make c_longlong an alias for c_long
c_longlong = c_long
c_ulonglong = c_ulong
else:
class c_longlong(_SimpleCData):
_type_ = "q"
_check_size(c_longlong)
class c_ulonglong(_SimpleCData):
_type_ = "Q"
## def from_param(cls, val):
## return ('d', float(val), val)
## from_param = classmethod(from_param)
_check_size(c_ulonglong)
class c_ubyte(_SimpleCData):
_type_ = "B"
c_ubyte.__ctype_le__ = c_ubyte.__ctype_be__ = c_ubyte
# backward compatibility:
##c_uchar = c_ubyte
_check_size(c_ubyte)
class c_byte(_SimpleCData):
_type_ = "b"
c_byte.__ctype_le__ = c_byte.__ctype_be__ = c_byte
_check_size(c_byte)
class c_char(_SimpleCData):
_type_ = "c"
c_char.__ctype_le__ = c_char.__ctype_be__ = c_char
_check_size(c_char)
class c_char_p(_SimpleCData):
_type_ = "z"
if _os.name == "nt":
def __repr__(self):
if not windll.kernel32.IsBadStringPtrA(self, -1):
return "%s(%r)" % (self.__class__.__name__, self.value)
return "%s(%s)" % (self.__class__.__name__, cast(self, c_void_p).value)
else:
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, cast(self, c_void_p).value)
_check_size(c_char_p, "P")
class c_void_p(_SimpleCData):
_type_ = "P"
c_voidp = c_void_p # backwards compatibility (to a bug)
_check_size(c_void_p)
class c_bool(_SimpleCData):
_type_ = "?"
from _ctypes import POINTER, pointer, _pointer_type_cache
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
if _os.name in ("nt", "ce"):
set_conversion_mode("mbcs", "ignore")
else:
set_conversion_mode("ascii", "strict")
class c_wchar_p(_SimpleCData):
_type_ = "Z"
class c_wchar(_SimpleCData):
_type_ = "u"
POINTER(c_wchar).from_param = c_wchar_p.from_param #_SimpleCData.c_wchar_p_from_param
def create_unicode_buffer(init, size=None):
"""create_unicode_buffer(aString) -> character array
create_unicode_buffer(anInteger) -> character array
create_unicode_buffer(aString, anInteger) -> character array
"""
if isinstance(init, (str, unicode)):
if size is None:
size = len(init)+1
buftype = c_wchar * size
buf = buftype()
buf.value = init
return buf
elif isinstance(init, (int, long)):
buftype = c_wchar * init
buf = buftype()
return buf
raise TypeError(init)
POINTER(c_char).from_param = c_char_p.from_param #_SimpleCData.c_char_p_from_param
# XXX Deprecated
def SetPointerType(pointer, cls):
if _pointer_type_cache.get(cls, None) is not None:
raise RuntimeError("This type already exists in the cache")
if id(pointer) not in _pointer_type_cache:
raise RuntimeError("What's this???")
pointer.set_type(cls)
_pointer_type_cache[cls] = pointer
del _pointer_type_cache[id(pointer)]
# XXX Deprecated
def ARRAY(typ, len):
return typ * len
################################################################
class CDLL(object):
"""An instance of this class represents a loaded dll/shared
library, exporting functions using the standard C calling
convention (named 'cdecl' on Windows).
The exported functions can be accessed as attributes, or by
indexing with the function name. Examples:
<obj>.qsort -> callable object
<obj>['qsort'] -> callable object
Calling the functions releases the Python GIL during the call and
reacquires it afterwards.
"""
_func_flags_ = _FUNCFLAG_CDECL
_func_restype_ = c_int
def __init__(self, name, mode=DEFAULT_MODE, handle=None,
use_errno=False,
use_last_error=False):
self._name = name
flags = self._func_flags_
if use_errno:
flags |= _FUNCFLAG_USE_ERRNO
if use_last_error:
flags |= _FUNCFLAG_USE_LASTERROR
class _FuncPtr(_CFuncPtr):
_flags_ = flags
_restype_ = self._func_restype_
self._FuncPtr = _FuncPtr
if handle is None:
self._handle = _dlopen(self._name, mode)
else:
self._handle = handle
def __repr__(self):
return "<%s '%s', handle %x at %x>" % \
(self.__class__.__name__, self._name,
(self._handle & (_sys.maxint*2 + 1)),
id(self) & (_sys.maxint*2 + 1))
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
raise AttributeError(name)
func = self.__getitem__(name)
setattr(self, name, func)
return func
def __getitem__(self, name_or_ordinal):
func = self._FuncPtr((name_or_ordinal, self))
if not isinstance(name_or_ordinal, (int, long)):
func.__name__ = name_or_ordinal
return func
class PyDLL(CDLL):
"""This class represents the Python library itself. It allows to
access Python API functions. The GIL is not released, and
Python exceptions are handled correctly.
"""
_func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
if _os.name in ("nt", "ce"):
class WinDLL(CDLL):
"""This class represents a dll exporting functions using the
Windows stdcall calling convention.
"""
_func_flags_ = _FUNCFLAG_STDCALL
# XXX Hm, what about HRESULT as normal parameter?
# Mustn't it derive from c_long then?
from _ctypes import _check_HRESULT, _SimpleCData
class HRESULT(_SimpleCData):
_type_ = "l"
# _check_retval_ is called with the function's result when it
# is used as restype. It checks for the FAILED bit, and
# raises a WindowsError if it is set.
#
# The _check_retval_ method is implemented in C, so that the
# method definition itself is not included in the traceback
# when it raises an error - that is what we want (and Python
# doesn't have a way to raise an exception in the caller's
# frame).
_check_retval_ = _check_HRESULT
class OleDLL(CDLL):
"""This class represents a dll exporting functions using the
Windows stdcall calling convention, and returning HRESULT.
HRESULT error values are automatically raised as WindowsError
exceptions.
"""
_func_flags_ = _FUNCFLAG_STDCALL
_func_restype_ = HRESULT
class LibraryLoader(object):
def __init__(self, dlltype):
self._dlltype = dlltype
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError(name)
dll = self._dlltype(name)
setattr(self, name, dll)
return dll
def __getitem__(self, name):
return getattr(self, name)
def LoadLibrary(self, name):
return self._dlltype(name)
cdll = LibraryLoader(CDLL)
pydll = LibraryLoader(PyDLL)
if _os.name in ("nt", "ce"):
pythonapi = PyDLL("python dll", None, _sys.dllhandle)
elif _sys.platform == "cygwin":
pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2])
else:
pythonapi = PyDLL(None)
if _os.name in ("nt", "ce"):
windll = LibraryLoader(WinDLL)
oledll = LibraryLoader(OleDLL)
if _os.name == "nt":
GetLastError = windll.kernel32.GetLastError
else:
GetLastError = windll.coredll.GetLastError
from _ctypes import get_last_error, set_last_error
def WinError(code=None, descr=None):
if code is None:
code = GetLastError()
if descr is None:
descr = FormatError(code).strip()
return WindowsError(code, descr)
_pointer_type_cache[None] = c_void_p
if sizeof(c_uint) == sizeof(c_void_p):
c_size_t = c_uint
elif sizeof(c_ulong) == sizeof(c_void_p):
c_size_t = c_ulong
elif sizeof(c_ulonglong) == sizeof(c_void_p):
c_size_t = c_ulonglong
# functions
from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr
## void *memmove(void *, const void *, size_t);
memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr)
## void *memset(void *, int, size_t)
memset = CFUNCTYPE(c_void_p, c_void_p, c_int, c_size_t)(_memset_addr)
def PYFUNCTYPE(restype, *argtypes):
class CFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
return CFunctionType
_cast = PYFUNCTYPE(py_object, c_void_p, py_object, py_object)(_cast_addr)
def cast(obj, typ):
return _cast(obj, obj, typ)
_string_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr)
def string_at(ptr, size=-1):
"""string_at(addr[, size]) -> string
Return the string at addr."""
return _string_at(ptr, size)
try:
from _ctypes import _wstring_at_addr
except ImportError:
pass
else:
_wstring_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_wstring_at_addr)
def wstring_at(ptr, size=-1):
"""wstring_at(addr[, size]) -> string
Return the string at addr."""
return _wstring_at(ptr, size)
if _os.name in ("nt", "ce"): # COM stuff
def DllGetClassObject(rclsid, riid, ppv):
try:
ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
except ImportError:
return -2147221231 # CLASS_E_CLASSNOTAVAILABLE
else:
return ccom.DllGetClassObject(rclsid, riid, ppv)
def DllCanUnloadNow():
try:
ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
except ImportError:
return 0 # S_OK
return ccom.DllCanUnloadNow()
from ctypes._endian import BigEndianStructure, LittleEndianStructure
# Fill in specifically-sized types
c_int8 = c_byte
c_uint8 = c_ubyte
for kind in [c_short, c_int, c_long, c_longlong]:
if sizeof(kind) == 2: c_int16 = kind
elif sizeof(kind) == 4: c_int32 = kind
elif sizeof(kind) == 8: c_int64 = kind
for kind in [c_ushort, c_uint, c_ulong, c_ulonglong]:
if sizeof(kind) == 2: c_uint16 = kind
elif sizeof(kind) == 4: c_uint32 = kind
elif sizeof(kind) == 8: c_uint64 = kind
del(kind)
# XXX for whatever reasons, creating the first instance of a callback
# function is needed for the unittests on Win64 to succeed. This MAY
# be a compiler bug, since the problem occurs only when _ctypes is
# compiled with the MS SDK compiler. Or an uninitialized variable?
CFUNCTYPE(c_int)(lambda: None)
| apache-2.0 |
decvalts/landlab | landlab/components/flow_accum/flow_accumulation2.py | 1 | 4383 | #! /usr/env/python
"""
A python flow accumulation module. It is designed to be general, and to operate across multiple grids and multiple flow direction patterns. However, at the moment, only a steepest descent (single path) routing scheme is implemented.
There remain some outstanding issues with the handling of boundary cells, which this component has inherited from flow_routing_D8.
Created DEJH, 8/2013
"""
from __future__ import print_function
import landlab
from landlab import ModelParameterDictionary
import numpy as np
class AccumFlow(object):
"""
This class allows the routing of flow around a landscape according to a previously calculated flow direction vector. It is not sensitive to grid type. It will eventually be able to work with discharges which are split across more than one node, but at the moment, assumes a single line of descent for a given node.
"""
def __init__(self, grid):
self.initialize(grid)
def initialize(self, grid):
self.grid = grid
##create and initial grid if one doesn't already exist
#if self.grid==None:
# self.grid = create_and_initialize_grid(input_stream)
self.flow_accum_by_area = np.zeros(grid.number_of_nodes+1) #prefilled with zeros, size of WHOLE grid+1, to allow -1 ids
def calc_flowacc(self, grid, z, flowdirs):
active_cell_ids = grid.get_active_cell_node_ids()
#Perform test to see if the flowdir data is a single vector, or multidimensional, here. Several ways possible: 1. Is the vector multidimensional?, e.g., try: data.flowdirs.shape[1] 2. set a flag in flowdir.
try:
height_order_active_cells = np.argsort(z[active_cell_ids])[::-1] #descending order
except:
print('Cells could not be sorted by elevation. Does the data object contain the elevation vector?')
try:
sorted_flowdirs = (flowdirs[active_cell_ids])[height_order_active_cells]
except:
print('Flow directions could not be sorted by elevation. Does the data object contain the flow direction vector?')
#print grid.cell_areas
self.flow_accum_by_area[active_cell_ids] = grid.cell_areas #This is only the active nodes == cells by definition
#print len(height_order_active_cells), len(sorted_flowdirs), len(self.flow_accum_by_area)
#print height_order_active_cells
#print sorted_flowdirs
#print data.flowdirs
#print self.flow_accum_by_area.reshape(5,5)
#---
# Two ways of routing flow are provided. All route flow in descending height order.
#The first, using weave, is not working due to an installation-dependent with the C++ compiler weave uses. However, it will be a massive improvement over other methods
#The second is an inefficient but functional looped method.
#cpp_code_fragment = """
#printf ('Test');
#"""
#Shouldn't need to return_val, as we're handling mutable objects not ints
# flow_accum_by_area = self.flow_accum_by_area
# a=1.
# weave.inline(cpp_code_fragment, ['a'], compiler='gcc') #['flow_accum_by_area', 'height_order_active_cells', 'sorted_flowdirs', 'active_cell_ids']) #,verbose=2, compiler='gcc')
#---
##inefficient Python code to mimic the above weave:
for i in xrange(len(sorted_flowdirs)):
iter_height_order = height_order_active_cells[i]
iter_sorted_fldirs = sorted_flowdirs[i]
self.flow_accum_by_area[iter_sorted_fldirs] += (self.flow_accum_by_area[active_cell_ids])[iter_height_order]
return self.flow_accum_by_area[:-1]
#int downhill_node;
#int active_node;
#PyObject *active_node_array[(Nactive_cell_ids[0])];
#for (int i=0; i<(Nactive_cell_ids[0]); i++)
# {
# active_node_array[i] = &(flow_accum_by_area[(active_cell_ids[i])]);
# }
#
#for (int i=0; i<(Nsorted_flowdirs[0]); i++)
#{
# downhill_node = sorted_flowdirs[i];
# active_node = height_order_active_cells[i];
# if downhill_node != active_cell_ids[active_node]
# {
# flow_accum_by_area[downhill_node] += *(active_node_array[active_node]);
# }
#}
| mit |
koery/win-sublime | Data/Packages/Package Control/package_control/providers/schema_compat.py | 2 | 1463 | from ..download_manager import update_url
def platforms_to_releases(info, debug):
"""
Accepts a dict from a schema version 1.0, 1.1 or 1.2 package containing
a "platforms" key and converts it to a list of releases compatible with'
schema version 2.0.
:param info:
The dict of package info
:param debug:
If debug information should be shown
:return:
A list of release dicts
"""
output = []
temp_releases = {}
platforms = info.get('platforms')
for platform in platforms:
for release in platforms[platform]:
key = '%s-%s' % (release['version'], release['url'])
if not key in temp_releases:
temp_releases[key] = {
'sublime_text': '<3000',
'version': release['version'],
'date': info.get('last_modified', '2011-08-01 00:00:00'),
'url': update_url(release['url'], debug),
'platforms': []
}
if platform == '*':
temp_releases[key]['platforms'] = ['*']
elif temp_releases[key]['platforms'] != ['*']:
temp_releases[key]['platforms'].append(platform)
for key in temp_releases:
release = temp_releases[key]
if release['platforms'] == ['windows', 'linux', 'osx']:
release['platforms'] = ['*']
output.append(release)
return output
| mit |
DataONEorg/d1_python | lib_client/src/d1_client/iter/sysmeta_multi.py | 1 | 2823 | # This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import d1_common.const
import d1_common.xml
import d1_client.iter.base_multi
logger = logging.getLogger(__name__)
# fmt: off
class SystemMetadataIteratorMulti(d1_client.iter.base_multi.MultiprocessedIteratorBase):
# language=rst
"""Multiprocessed SystemMetadata Iterator.
Iterate over SystemMetadata XML objects, describing Science Objects available on Member
Nodes.
This is a multiprocessed implementation. See :ref:`d1_client/ref/iterators:DataONE
Iterators` for an overview of the available iterator types and implementations.
"""
def __init__(
self,
base_url=d1_common.const.URL_DATAONE_ROOT,
page_size=d1_client.iter.base_multi.PAGE_SIZE,
max_workers=d1_client.iter.base_multi.MAX_WORKERS,
max_result_queue_size=d1_client.iter.base_multi.MAX_RESULT_QUEUE_SIZE,
max_task_queue_size=d1_client.iter.base_multi.MAX_TASK_QUEUE_SIZE,
api_major=d1_client.iter.base_multi.API_MAJOR,
client_arg_dict=None,
list_objects_arg_dict=None,
get_system_metadata_arg_dict=None,
):
super(SystemMetadataIteratorMulti, self).__init__(
base_url, page_size, max_workers, max_result_queue_size,
max_task_queue_size, api_major, client_arg_dict, list_objects_arg_dict,
get_system_metadata_arg_dict, _page_func, _iter_func, _item_proc_func
)
def _page_func(client):
return client.listObjects
def _iter_func(page_pyxb):
return page_pyxb.objectInfo
def _item_proc_func(client, item_pyxb, get_system_metadata_arg_dict):
pid = d1_common.xml.get_req_val(item_pyxb.identifier)
logger.debug('Retrieving System Metadata. pid="{}".format(pid)')
try:
return client.getSystemMetadata(pid, get_system_metadata_arg_dict)
except Exception as e:
logger.error(
'Unable to retrieve System Metadata. pid="{}" error="{}"'.format(
pid, str(e)
)
)
return {"pid": pid, "error": e.name}
| apache-2.0 |
nanolearning/edx-platform | cms/djangoapps/contentstore/features/course-export.py | 3 | 2228 | # pylint: disable=C0111
# pylint: disable=W0621
# pylint: disable=W0613
from lettuce import world, step
from component_settings_editor_helpers import enter_xml_in_advanced_problem
from nose.tools import assert_true, assert_equal
from xmodule.modulestore.locations import SlashSeparatedCourseKey
from contentstore.utils import reverse_usage_url
@step('I go to the export page$')
def i_go_to_the_export_page(step):
world.click_tools()
link_css = 'li.nav-course-tools-export a'
world.css_click(link_css)
@step('I export the course$')
def i_export_the_course(step):
step.given('I go to the export page')
world.css_click('a.action-export')
@step('I edit and enter bad XML$')
def i_enter_bad_xml(step):
enter_xml_in_advanced_problem(step,
"""<problem><h1>Smallest Canvas</h1>
<p>You want to make the smallest canvas you can.</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false"><verbatim><canvas id="myCanvas" width = 10 height = 100> </canvas></verbatim></choice>
<choice correct="true"><code><canvas id="myCanvas" width = 10 height = 10> </canvas></code></choice>
</choicegroup>
</multiplechoiceresponse>
</problem>"""
)
@step('I edit and enter an ampersand$')
def i_enter_an_ampersand(step):
enter_xml_in_advanced_problem(step, "<problem>&</problem>")
@step('I get an error dialog$')
def get_an_error_dialog(step):
assert_true(world.is_css_present("div.prompt.error"))
@step('I can click to go to the unit with the error$')
def i_click_on_error_dialog(step):
world.click_link_by_text('Correct failed component')
assert_true(world.css_html("span.inline-error").startswith("Problem i4x://MITx/999/problem"))
course_key = SlashSeparatedCourseKey("MITx", "999", "Robot_Super_Course")
# we don't know the actual ID of the vertical. So just check that we did go to a
# vertical page in the course (there should only be one).
vertical_usage_key = course_key.make_usage_key("vertical", "")
vertical_url = reverse_usage_url('unit_handler', vertical_usage_key)
assert_equal(1, world.browser.url.count(vertical_url))
| agpl-3.0 |
owaiskhan/Retransmission-Combining | gnuradio-examples/python/pfb/resampler.py | 7 | 4207 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys
try:
import scipy
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print "Resampling from %f to %f by %f " %(fs_in, fs_out, rerate)
# Creating our own taps
taps = gr.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = gr.sig_source_c(fs_in, gr.GR_SIN_WAVE, fc, 1)
#self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = blks2.pfb_arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = blks2.pfb_arb_resampler_ccf(rerate)
self.snk_in = gr.vector_sink_c()
self.snk_0 = gr.vector_sink_c()
self.snk_1 = gr.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pylab.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp1.set_xlim([-fs_in/2, fs_in/2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
sp2.set_xlim([-fs_out/2, fs_out/2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0/fs_in
Ts_out = 1.0/fs_out
t_in = scipy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = scipy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pylab.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
r = float(fs_out)/float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
proxysh/Safejumper-for-Mac | buildmac/Resources/env/lib/python2.7/site-packages/twisted/web/http_headers.py | 14 | 8231 | # -*- test-case-name: twisted.web.test.test_http_headers -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An API for storing HTTP header names and values.
"""
from __future__ import division, absolute_import
from twisted.python.compat import comparable, cmp, unicode
def _dashCapitalize(name):
"""
Return a byte string which is capitalized using '-' as a word separator.
@param name: The name of the header to capitalize.
@type name: L{bytes}
@return: The given header capitalized using '-' as a word separator.
@rtype: L{bytes}
"""
return b'-'.join([word.capitalize() for word in name.split(b'-')])
@comparable
class Headers(object):
"""
Stores HTTP headers in a key and multiple value format.
Most methods accept L{bytes} and L{unicode}, with an internal L{bytes}
representation. When passed L{unicode}, header names (e.g. 'Content-Type')
are encoded using ISO-8859-1 and header values (e.g.
'text/html;charset=utf-8') are encoded using UTF-8. Some methods that return
values will return them in the same type as the name given.
If the header keys or values cannot be encoded or decoded using the rules
above, using just L{bytes} arguments to the methods of this class will
ensure no decoding or encoding is done, and L{Headers} will treat the keys
and values as opaque byte strings.
@cvar _caseMappings: A L{dict} that maps lowercase header names
to their canonicalized representation.
@ivar _rawHeaders: A L{dict} mapping header names as L{bytes} to L{list}s of
header values as L{bytes}.
"""
_caseMappings = {
b'content-md5': b'Content-MD5',
b'dnt': b'DNT',
b'etag': b'ETag',
b'p3p': b'P3P',
b'te': b'TE',
b'www-authenticate': b'WWW-Authenticate',
b'x-xss-protection': b'X-XSS-Protection'}
def __init__(self, rawHeaders=None):
self._rawHeaders = {}
if rawHeaders is not None:
for name, values in rawHeaders.items():
self.setRawHeaders(name, values)
def __repr__(self):
"""
Return a string fully describing the headers set on this object.
"""
return '%s(%r)' % (self.__class__.__name__, self._rawHeaders,)
def __cmp__(self, other):
"""
Define L{Headers} instances as being equal to each other if they have
the same raw headers.
"""
if isinstance(other, Headers):
return cmp(
sorted(self._rawHeaders.items()),
sorted(other._rawHeaders.items()))
return NotImplemented
def _encodeName(self, name):
"""
Encode the name of a header (eg 'Content-Type') to an ISO-8859-1 encoded
bytestring if required.
@param name: A HTTP header name
@type name: L{unicode} or L{bytes}
@return: C{name}, encoded if required, lowercased
@rtype: L{bytes}
"""
if isinstance(name, unicode):
return name.lower().encode('iso-8859-1')
return name.lower()
def _encodeValue(self, value):
"""
Encode a single header value to a UTF-8 encoded bytestring if required.
@param value: A single HTTP header value.
@type value: L{bytes} or L{unicode}
@return: C{value}, encoded if required
@rtype: L{bytes}
"""
if isinstance(value, unicode):
return value.encode('utf8')
return value
def _encodeValues(self, values):
"""
Encode a L{list} of header values to a L{list} of UTF-8 encoded
bytestrings if required.
@param values: A list of HTTP header values.
@type values: L{list} of L{bytes} or L{unicode} (mixed types allowed)
@return: C{values}, with each item encoded if required
@rtype: L{list} of L{bytes}
"""
newValues = []
for value in values:
newValues.append(self._encodeValue(value))
return newValues
def _decodeValues(self, values):
"""
Decode a L{list} of header values into a L{list} of Unicode strings.
@param values: A list of HTTP header values.
@type values: L{list} of UTF-8 encoded L{bytes}
@return: C{values}, with each item decoded
@rtype: L{list} of L{unicode}
"""
newValues = []
for value in values:
newValues.append(value.decode('utf8'))
return newValues
def copy(self):
"""
Return a copy of itself with the same headers set.
@return: A new L{Headers}
"""
return self.__class__(self._rawHeaders)
def hasHeader(self, name):
"""
Check for the existence of a given header.
@type name: L{bytes} or L{unicode}
@param name: The name of the HTTP header to check for.
@rtype: L{bool}
@return: C{True} if the header exists, otherwise C{False}.
"""
return self._encodeName(name) in self._rawHeaders
def removeHeader(self, name):
"""
Remove the named header from this header object.
@type name: L{bytes} or L{unicode}
@param name: The name of the HTTP header to remove.
@return: L{None}
"""
self._rawHeaders.pop(self._encodeName(name), None)
def setRawHeaders(self, name, values):
"""
Sets the raw representation of the given header.
@type name: L{bytes} or L{unicode}
@param name: The name of the HTTP header to set the values for.
@type values: L{list} of L{bytes} or L{unicode} strings
@param values: A list of strings each one being a header value of
the given name.
@return: L{None}
"""
if not isinstance(values, list):
raise TypeError("Header entry %r should be list but found "
"instance of %r instead" % (name, type(values)))
name = self._encodeName(name)
self._rawHeaders[name] = self._encodeValues(values)
def addRawHeader(self, name, value):
"""
Add a new raw value for the given header.
@type name: L{bytes} or L{unicode}
@param name: The name of the header for which to set the value.
@type value: L{bytes} or L{unicode}
@param value: The value to set for the named header.
"""
values = self.getRawHeaders(name)
if values is not None:
values.append(value)
else:
values = [value]
self.setRawHeaders(name, values)
def getRawHeaders(self, name, default=None):
"""
Returns a list of headers matching the given name as the raw string
given.
@type name: L{bytes} or L{unicode}
@param name: The name of the HTTP header to get the values of.
@param default: The value to return if no header with the given C{name}
exists.
@rtype: L{list} of strings, same type as C{name} (except when
C{default} is returned).
@return: If the named header is present, a L{list} of its
values. Otherwise, C{default}.
"""
encodedName = self._encodeName(name)
values = self._rawHeaders.get(encodedName, default)
if isinstance(name, unicode) and values is not default:
return self._decodeValues(values)
return values
def getAllRawHeaders(self):
"""
Return an iterator of key, value pairs of all headers contained in this
object, as L{bytes}. The keys are capitalized in canonical
capitalization.
"""
for k, v in self._rawHeaders.items():
yield self._canonicalNameCaps(k), v
def _canonicalNameCaps(self, name):
"""
Return the canonical name for the given header.
@type name: L{bytes}
@param name: The all-lowercase header name to capitalize in its
canonical form.
@rtype: L{bytes}
@return: The canonical name of the header.
"""
return self._caseMappings.get(name, _dashCapitalize(name))
__all__ = ['Headers']
| gpl-2.0 |
lucasa/landell-fgdp | sltv/ui/output/icecastoutput.py | 5 | 2588 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 Holoscópio Tecnologia
# Author: Luciana Fujii Pontello <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gobject
import gtk
from sltv.settings import UI_DIR
from core import OutputUI
class IcecastOutputUI(OutputUI):
def __init__(self):
OutputUI.__init__(self)
self.interface.add_from_file(UI_DIR + "/output/icecastoutput.ui")
self.server_entry = self.interface.get_object("server_entry")
self.user_entry = self.interface.get_object("user_entry")
self.port_spinbutton = self.interface.get_object("port_spinbutton")
self.password_entry = self.interface.get_object("password_entry")
self.mount_point_entry = self.interface.get_object("mount_point_entry")
self.box = self.interface.get_object("icecast_box")
self.config["location"] = ""
def set_filename(self, button):
self.config["location"] = button.get_filename()
def get_widget(self):
return self.box
def get_name(self):
return "Icecast"
def get_description(self):
return "Output video to Icecast"
def update_config(self):
self.server_entry.set_text(self.config["ip"])
self.user_entry.set_text(self.config["username"])
self.password_entry.set_text(self.config["password"])
self.port_spinbutton.set_value(float(self.config["port"]))
self.mount_point_entry.set_text(self.config["mount"])
OutputUI.update_config(self)
def get_config(self):
self.config["ip"] = self.server_entry.get_text()
self.config["username"] = self.user_entry.get_text()
self.config["password"] = self.password_entry.get_text()
self.config["port"] = int(self.port_spinbutton.get_value())
self.config["mount"] = self.mount_point_entry.get_text()
OutputUI.get_config(self)
return self.config
| gpl-2.0 |
ttfseiko/openerp-trunk | openerp/addons/mrp_byproduct/__openerp__.py | 121 | 1847 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MRP Byproducts',
'version': '1.0',
'category': 'Manufacturing',
'description': """
This module allows you to produce several products from one production order.
=============================================================================
You can configure by-products in the bill of material.
Without this module:
--------------------
A + B + C -> D
With this module:
-----------------
A + B + C -> D + E
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/bom_byproduct.jpeg'],
'depends': ['base', 'mrp'],
'data': [
'security/ir.model.access.csv',
'mrp_byproduct_view.xml'
],
'demo': [],
'test': ['test/mrp_byproduct.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
our-city-app/oca-backend | src/rogerthat/dal/service.py | 1 | 17053 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
import logging
import time
from types import NoneType
from google.appengine.ext import db, ndb
from typing import List
from mcfw.cache import cached
from mcfw.properties import azzert
from mcfw.rpc import arguments, returns
from rogerthat.dal import parent_key, generator
from rogerthat.models import APIKey, SIKKey, FriendServiceIdentityConnection, ServiceInteractionDef, MFRSIKey, \
ServiceMenuDef, ServiceIdentity, QRTemplate, ServiceIdentityStatistic, ServiceProfile, \
NdbServiceMenuDef
from rogerthat.rpc import users, rpc
from rogerthat.rpc.models import ServiceLog, ServiceAPICallback, NdbServiceLog
from rogerthat.utils import now
from rogerthat.utils.service import create_service_identity_user, get_service_user_from_service_identity_user
@returns([ServiceAPICallback])
@arguments(service_user=users.User)
def get_service_api_callback_records(service_user):
qry = ServiceAPICallback.gql("WHERE ANCESTOR IS :ancestor")
qry.bind(ancestor=parent_key(service_user))
return generator(qry.run())
@returns(db.GqlQuery)
@arguments(service_user=users.User)
def get_service_api_callback_records_query(service_user):
qry = ServiceAPICallback.gql("WHERE ANCESTOR IS :ancestor")
qry.bind(ancestor=parent_key(service_user))
return qry
@returns([APIKey])
@arguments(service_user=users.User)
def get_api_keys(service_user):
return APIKey.all().filter("user =", service_user).filter("mfr =", False)
@returns(long)
@arguments(service_user=users.User)
def get_api_key_count(service_user):
return APIKey.all().filter("user =", service_user).filter("mfr =", False).count()
@cached(1, request=True, memcache=True)
@returns(APIKey)
@arguments(service_user=users.User)
def get_mfr_api_key(service_user):
return APIKey.all().filter("user =", service_user).filter("mfr =", True).get()
@cached(1, request=True, memcache=True)
@returns(APIKey)
@arguments(key=unicode)
def get_api_key(key):
return APIKey.get_by_key_name(key)
@cached(1, request=True, memcache=True)
@returns(SIKKey)
@arguments(key=unicode)
def get_sik(key):
return SIKKey.get_by_key_name(key)
@cached(1, request=True, memcache=True, lifetime=0)
@returns(MFRSIKey)
@arguments(service_user=users.User)
def get_mfr_sik(service_user):
key = service_user.email()
parent = parent_key(service_user)
return MFRSIKey.get_by_key_name(key, parent=parent)
@returns(FriendServiceIdentityConnection)
@arguments(friend_user=users.User, service_identity_user=users.User)
def get_friend_serviceidentity_connection(friend_user, service_identity_user):
return db.get(FriendServiceIdentityConnection.createKey(friend_user, service_identity_user))
def _limit_request_data(request, method):
if not request:
return request
length = len(request)
if length < 900 * 1024:
return request
if method not in ('system.store_branding', 'system.store_pdf_branding', 'system.import_service'):
logging.error('Large api call size (%s kB) for method %s', length / 1024, method)
return json.dumps(_limit_size(json.loads(request)))
def _limit_size(obj):
if isinstance(obj, list):
return map(_limit_size, obj)
elif isinstance(obj, dict):
return {key: _limit_size(value) for key, value in obj.iteritems()}
elif hasattr(obj, '__len__') and len(obj) > 200 * 1024:
return '[long content omitted]'
return obj
@returns(NoneType)
@arguments(service_user=users.User, rpc_id=unicode, type_=int, status=int, function=unicode, request=unicode,
response=unicode, error_code=int, error_message=unicode)
def log_service_activity(service_user, rpc_id, type_, status, function, request, response, error_code=0,
error_message=None):
request = _limit_request_data(request, function)
if ndb.in_transaction():
rpc.rpc_items.append(
NdbServiceLog(parent=ndb.Key(u'ServiceLogParent', rpc_id), user=service_user, type=type_,
status=status, function=function, request=request, response=response,
timestamp=int(time.time() * 1000), error_code=error_code,
error_message=error_message).put_async(),
_log_service_activity_deferred, service_user, rpc_id, type_, status, function, request, response,
error_code, error_message)
else:
rpc.rpc_items.append(
db.put_async(
ServiceLog(parent=db.Key.from_path(u'ServiceLogParent', rpc_id), user=service_user, type=type_,
status=status, function=function, request=request, response=response,
timestamp=int(time.time() * 1000), error_code=error_code, error_message=error_message)),
_log_service_activity_deferred, service_user, rpc_id, type_, status, function, request, response,
error_code, error_message)
def _log_service_activity_deferred(service_user, rpc_id, type_, status, function, request, response, error_code,
error_message):
ServiceLog(parent=db.Key.from_path(u'ServiceLogParent', rpc_id), user=service_user, type=type_, status=status,
function=function, request=request, response=response, timestamp=int(time.time() * 1000),
error_code=error_code, error_message=error_message).put()
@returns([ServiceLog])
@arguments(service_user=users.User, timestamp=long)
def get_service_log(service_user, timestamp):
if timestamp == 0:
timestamp = (now() + 1) * 1000
qry = ServiceLog.gql("WHERE user = :user AND timestamp < :timestamp ORDER BY timestamp DESC")
qry.bind(user=service_user, timestamp=timestamp)
return qry.fetch(100)
@returns(db.Query)
@arguments(service_identity_user=users.User, app_id=unicode)
def get_friend_service_identity_connections_of_service_identity_query(service_identity_user, app_id=None):
azzert('/' in service_identity_user.email(), 'no slash in %s' % service_identity_user.email())
if app_id:
return FriendServiceIdentityConnection.list_by_app_id(service_identity_user.email(), app_id)
else:
return FriendServiceIdentityConnection.list(service_identity_user.email())
def get_friend_service_identity_connections_of_service_identity_keys_query(service_identity_user):
# type: (users.User) -> db.Query
azzert('/' in service_identity_user.email(), 'no slash in %s' % service_identity_user.email())
return FriendServiceIdentityConnection.list(service_identity_user.email(), keys_only=True)
def get_friend_service_identity_connections_keys_of_app_user_query(app_user):
# type: (users.User) -> db.Query
app_user_email = app_user.email()
azzert('/' not in app_user_email, 'no slash expected in %s' % app_user_email)
return FriendServiceIdentityConnection.list_by_app_user(app_user, keys_only=True)
@returns(tuple)
@arguments(service_identity_user=users.User, cursor=unicode, count=int, app_id=unicode)
def get_users_connected_to_service_identity(service_identity_user, cursor, count=50, app_id=None):
qry = get_friend_service_identity_connections_of_service_identity_query(service_identity_user, app_id)
qry.with_cursor(cursor if cursor else None, None)
connections = qry.fetch(count)
return connections, qry.cursor() if len(connections) > 0 else ""
@returns(int)
@arguments(service_identity_user=users.User)
def count_users_connected_to_service_identity(service_identity_user):
return get_friend_service_identity_connections_of_service_identity_keys_query(service_identity_user).count(1000000)
@returns(db.GqlQuery)
@arguments(service_user=users.User)
def get_all_service_friend_keys_query(service_user):
"""Returns a query that results in all FriendServiceIdentityConnection of a service and all its identities."""
# service_user can be a service_identity_user
email = get_service_user_from_service_identity_user(service_user).email() + '/'
qry = db.GqlQuery("SELECT __key__ FROM FriendServiceIdentityConnection"
" WHERE service_identity_email >= :from_service_identity_email"
" AND service_identity_email < :to_service_identity_email")
qry.bind(from_service_identity_email=email, to_service_identity_email=email + u"\ufffd")
return qry
@returns(db.GqlQuery)
@arguments(service_user=users.User, app_user=users.User)
def get_friend_service_identity_connections_keys_query(service_user, app_user):
"""Returns a query that results in all FriendServiceIdentityConnection between a service and a user."""
email = service_user.email()
qry = db.GqlQuery("SELECT __key__ FROM FriendServiceIdentityConnection"
" WHERE ANCESTOR is :ancestor"
" AND service_identity_email >= :from_service_identity_email"
" AND service_identity_email < :to_service_identity_email")
qry.bind(ancestor=parent_key(app_user),
from_service_identity_email=email + '/',
to_service_identity_email=email + u"/\ufffd")
return qry
@returns(db.GqlQuery)
@arguments(service_identity_user=users.User, app_user=users.User)
def get_one_friend_service_identity_connection_keys_query(service_identity_user, app_user):
"""Returns a query that results in a FriendServiceIdentityConnection between a service identity and a user."""
service_identity_email = service_identity_user.email()
qry = db.GqlQuery("SELECT __key__ FROM FriendServiceIdentityConnection"
" WHERE ANCESTOR is :ancestor"
" AND service_identity_email = :service_identity_email")
qry.bind(ancestor=parent_key(app_user),
service_identity_email=service_identity_email)
return qry
@returns(ServiceInteractionDef)
@arguments(service_user=users.User, sid=(int, long))
def get_service_interaction_def(service_user, sid):
return ServiceInteractionDef.get_by_id(sid, parent_key(service_user))
@returns(dict)
@arguments(service_user=users.User, identifier=unicode, cursor=unicode, include_deleted=bool)
def get_service_interaction_defs(service_user, identifier, cursor, include_deleted=False):
if identifier is None:
if include_deleted:
qry_string = "WHERE ANCESTOR IS :ancestor ORDER BY timestamp DESC"
else:
qry_string = "WHERE ANCESTOR IS :ancestor AND deleted = FALSE ORDER BY timestamp DESC"
qry = ServiceInteractionDef.gql(qry_string)
qry.bind(ancestor=parent_key(service_user))
else:
if include_deleted:
qry_string = "WHERE ANCESTOR IS :ancestor AND service_identity = :identifier ORDER BY timestamp DESC"
else:
qry_string = "WHERE ANCESTOR IS :ancestor AND service_identity = :identifier AND deleted = FALSE ORDER BY timestamp DESC"
qry = ServiceInteractionDef.gql(qry_string)
qry.bind(identifier=identifier, ancestor=parent_key(service_user))
qry.with_cursor(cursor if cursor else None, None)
defs = qry.fetch(10)
return {
"defs": defs,
"cursor": qry.cursor() if len(defs) > 0 else ""
}
@returns([ServiceMenuDef])
@arguments(service_identity_user=users.User)
def get_service_menu_items(service_identity_user):
svc_user = get_service_user_from_service_identity_user(service_identity_user)
qry = ServiceMenuDef.gql("WHERE ANCESTOR IS :ancestor")
qry.bind(ancestor=parent_key(svc_user))
return generator(qry.run())
@returns([NdbServiceMenuDef])
@arguments(service_identity_user=users.User)
def ndb_get_service_menu_items(service_identity_user):
svc_user = get_service_user_from_service_identity_user(service_identity_user)
return NdbServiceMenuDef.list_by_service(svc_user)
@returns(ServiceMenuDef)
@arguments(service_identity_user=users.User, coords=[(int, long)])
def get_service_menu_item_by_coordinates(service_identity_user, coords):
# type: (users.User, list[int]) -> ServiceMenuDef
service_user = get_service_user_from_service_identity_user(service_identity_user)
return ServiceMenuDef.get_by_key_name("x".join((str(x) for x in coords)), parent=parent_key(service_user))
@cached(1, request=True, lifetime=0, read_cache_in_transaction=True)
@returns(ServiceIdentity)
@arguments(service_identity_user=users.User)
def get_service_identity(service_identity_user):
# type: (users.User) -> ServiceIdentity
return get_service_identity_not_cached(service_identity_user)
@returns([ServiceIdentity])
@arguments(service_identity_users=[users.User])
def get_service_identities_not_cached(service_identity_users):
# type: (List[users.User]) -> List[ServiceIdentity]
# XXX: populate cache
return db.get([ServiceIdentity.keyFromUser(service_identity_user)
for service_identity_user in service_identity_users])
@returns(ServiceIdentity)
@arguments(service_user=users.User)
def get_default_service_identity(service_user):
# type: (users.User) -> ServiceIdentity
return get_service_identity(create_service_identity_user(service_user, ServiceIdentity.DEFAULT))
@returns(ServiceIdentity)
@arguments(service_identity_user=users.User)
def get_service_identity_not_cached(service_identity_user):
# type: (users.User) -> ServiceIdentity
# XXX: populate cache
return ServiceIdentity.get(ServiceIdentity.keyFromUser(service_identity_user))
@returns(ServiceIdentity)
@arguments(service_user=users.User)
def get_default_service_identity_not_cached(service_user):
# type: (users.User) -> ServiceIdentity
# XXX: populate cache
return get_service_identity_not_cached(create_service_identity_user(service_user, ServiceIdentity.DEFAULT))
@returns(db.Query)
@arguments(service_user=users.User, keys_only=bool)
def get_service_identities_query(service_user, keys_only=False):
return ServiceIdentity.all(keys_only=keys_only).ancestor(parent_key(service_user))
@returns([ServiceIdentity])
@arguments(service_user=users.User)
def get_service_identities(service_user):
# type: (users.User) -> list[ServiceIdentity]
qry = get_service_identities_query(service_user)
return generator(qry)
@returns([ServiceIdentity])
@arguments(service_identity_users=[users.User], app_id=unicode, organization_type=int)
def get_service_identities_by_service_identity_users(service_identity_users, app_id=None,
organization_type=ServiceProfile.ORGANIZATION_TYPE_UNSPECIFIED):
# type: (List[users.User], unicode, int) -> List[ServiceIdentity]
# XXX: populate cache
service_identities = db.get([ServiceIdentity.keyFromUser(u) for u in service_identity_users])
if app_id is not None:
service_identities = [si for si in service_identities if app_id in si.appIds]
if organization_type != ServiceProfile.ORGANIZATION_TYPE_UNSPECIFIED:
from rogerthat.dal.profile import get_service_profiles
service_profiles = get_service_profiles([si.service_user for si in service_identities])
for si, sp in zip(service_identities, service_profiles):
if sp.organizationType != organization_type:
service_identities.remove(si)
return service_identities
@returns([ServiceIdentity])
@arguments(service_user=users.User)
def get_child_identities(service_user):
# type: (users.User) -> List[ServiceIdentity]
return [si for si in get_service_identities(service_user) if not si.is_default]
@returns(tuple)
@arguments(service_user=users.User, cursor=unicode)
def get_qr_templates(service_user, cursor):
qry = QRTemplate.gql("WHERE deleted = False AND ANCESTOR is :1", parent_key(service_user))
qry.with_cursor(cursor or None)
templates = qry.fetch(None)
return templates, qry.cursor() if len(templates) else None
@returns(ServiceIdentityStatistic)
@arguments(service_identity_user=users.User)
def get_identity_statistics(service_identity_user):
return ServiceIdentityStatistic.get(ServiceIdentityStatistic.create_key(service_identity_user))
@returns([ServiceIdentityStatistic])
@arguments(service_user=users.User)
def get_all_statistics_by_service_user(service_user):
return generator(ServiceIdentityStatistic.all().ancestor(parent_key(service_user)).run())
@returns(db.GqlQuery)
@arguments()
def get_service_idenities_by_send_email_statistics():
qry = ServiceIdentity.gql("WHERE emailStatistics = True")
return qry
| apache-2.0 |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/imghdr.py | 259 | 3544 | """Recognize image file formats based on their first few bytes."""
__all__ = ["what"]
#-------------------------#
# Recognize image headers #
#-------------------------#
def what(file, h=None):
if h is None:
if isinstance(file, basestring):
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
h = file.read(32)
file.seek(location)
f = None
else:
f = None
try:
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f: f.close()
return None
#---------------------------------#
# Subroutines per image file type #
#---------------------------------#
tests = []
def test_jpeg(h, f):
"""JPEG data in JFIF format"""
if h[6:10] == 'JFIF':
return 'jpeg'
tests.append(test_jpeg)
def test_exif(h, f):
"""JPEG data in Exif format"""
if h[6:10] == 'Exif':
return 'jpeg'
tests.append(test_exif)
def test_png(h, f):
if h[:8] == "\211PNG\r\n\032\n":
return 'png'
tests.append(test_png)
def test_gif(h, f):
"""GIF ('87 and '89 variants)"""
if h[:6] in ('GIF87a', 'GIF89a'):
return 'gif'
tests.append(test_gif)
def test_tiff(h, f):
"""TIFF (can be in Motorola or Intel byte order)"""
if h[:2] in ('MM', 'II'):
return 'tiff'
tests.append(test_tiff)
def test_rgb(h, f):
"""SGI image library"""
if h[:2] == '\001\332':
return 'rgb'
tests.append(test_rgb)
def test_pbm(h, f):
"""PBM (portable bitmap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '14' and h[2] in ' \t\n\r':
return 'pbm'
tests.append(test_pbm)
def test_pgm(h, f):
"""PGM (portable graymap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '25' and h[2] in ' \t\n\r':
return 'pgm'
tests.append(test_pgm)
def test_ppm(h, f):
"""PPM (portable pixmap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '36' and h[2] in ' \t\n\r':
return 'ppm'
tests.append(test_ppm)
def test_rast(h, f):
"""Sun raster file"""
if h[:4] == '\x59\xA6\x6A\x95':
return 'rast'
tests.append(test_rast)
def test_xbm(h, f):
"""X bitmap (X10 or X11)"""
s = '#define '
if h[:len(s)] == s:
return 'xbm'
tests.append(test_xbm)
def test_bmp(h, f):
if h[:2] == 'BM':
return 'bmp'
tests.append(test_bmp)
#--------------------#
# Small test program #
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print filename + '/:',
if recursive or toplevel:
print 'recursing down:'
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print '*** directory (use -r) ***'
else:
print filename + ':',
sys.stdout.flush()
try:
print what(filename)
except IOError:
print '*** not found ***'
| gpl-2.0 |
glatard/nipype | nipype/interfaces/afni/tests/test_auto_SkullStrip.py | 9 | 1138 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.afni.preprocess import SkullStrip
def test_SkullStrip_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-input %s',
copyfile=False,
mandatory=True,
position=1,
),
out_file=dict(argstr='-prefix %s',
name_source='in_file',
name_template='%s_skullstrip',
),
outputtype=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = SkullStrip.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SkullStrip_outputs():
output_map = dict(out_file=dict(),
)
outputs = SkullStrip.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
azatoth/scons | src/engine/SCons/Tool/mwcc.py | 6 | 6796 | """SCons.Tool.mwcc
Tool-specific initialization for the Metrowerks CodeWarrior compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import os.path
import SCons.Util
def set_vars(env):
"""Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars
MWCW_VERSIONS is set to a list of objects representing installed versions
MWCW_VERSION is set to the version object that will be used for building.
MWCW_VERSION can be set to a string during Environment
construction to influence which version is chosen, otherwise
the latest one from MWCW_VERSIONS is used.
Returns true if at least one version is found, false otherwise
"""
desired = env.get('MWCW_VERSION', '')
# return right away if the variables are already set
if isinstance(desired, MWVersion):
return 1
elif desired is None:
return 0
versions = find_versions()
version = None
if desired:
for v in versions:
if str(v) == desired:
version = v
elif versions:
version = versions[-1]
env['MWCW_VERSIONS'] = versions
env['MWCW_VERSION'] = version
if version is None:
return 0
env.PrependENVPath('PATH', version.clpath)
env.PrependENVPath('PATH', version.dllpath)
ENV = env['ENV']
ENV['CWFolder'] = version.path
ENV['LM_LICENSE_FILE'] = version.license
plus = lambda x: '+%s' % x
ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes))
ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs))
return 1
def find_versions():
"""Return a list of MWVersion objects representing installed versions"""
versions = []
### This function finds CodeWarrior by reading from the registry on
### Windows. Some other method needs to be implemented for other
### platforms, maybe something that calls env.WhereIs('mwcc')
if SCons.Util.can_read_reg:
try:
HLM = SCons.Util.HKEY_LOCAL_MACHINE
product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions'
product_key = SCons.Util.RegOpenKeyEx(HLM, product)
i = 0
while True:
name = product + '\\' + SCons.Util.RegEnumKey(product_key, i)
name_key = SCons.Util.RegOpenKeyEx(HLM, name)
try:
version = SCons.Util.RegQueryValueEx(name_key, 'VERSION')
path = SCons.Util.RegQueryValueEx(name_key, 'PATH')
mwv = MWVersion(version[0], path[0], 'Win32-X86')
versions.append(mwv)
except SCons.Util.RegError:
pass
i = i + 1
except SCons.Util.RegError:
pass
return versions
class MWVersion(object):
def __init__(self, version, path, platform):
self.version = version
self.path = path
self.platform = platform
self.clpath = os.path.join(path, 'Other Metrowerks Tools',
'Command Line Tools')
self.dllpath = os.path.join(path, 'Bin')
# The Metrowerks tools don't store any configuration data so they
# are totally dumb when it comes to locating standard headers,
# libraries, and other files, expecting all the information
# to be handed to them in environment variables. The members set
# below control what information scons injects into the environment
### The paths below give a normal build environment in CodeWarrior for
### Windows, other versions of CodeWarrior might need different paths.
msl = os.path.join(path, 'MSL')
support = os.path.join(path, '%s Support' % platform)
self.license = os.path.join(path, 'license.dat')
self.includes = [msl, support]
self.libs = [msl, support]
def __str__(self):
return self.version
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def generate(env):
"""Add Builders and construction variables for the mwcc to an Environment."""
import SCons.Defaults
import SCons.Tool
set_vars(env)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'
env['CC'] = 'mwcc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'
env['CXX'] = 'mwcc'
env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = '$CCFLAGS'
env['SHCFLAGS'] = '$CFLAGS'
env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = '$CXXFLAGS'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS'
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cpp'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
#env['PCH'] = ?
#env['PCHSTOP'] = ?
def exists(env):
return set_vars(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
caseyclements/bokeh | bokeh/server/start.py | 26 | 2775 | from __future__ import absolute_import, print_function
import logging
log = logging.getLogger(__name__)
import os
import sys
from tornado.httpserver import HTTPServer
from tornado import ioloop
from .settings import settings as server_settings
from bokeh import plotting # imports custom objects for plugin
from bokeh import models, protocol # import objects so that we can resolve them
# this just shuts up pyflakes
models, plotting, protocol
from . import services
from .app import bokeh_app, app
from .configure import configure_flask, make_tornado_app, register_blueprint
def doc_prepare():
server_settings.model_backend = {'type' : 'memory'}
configure_flask()
register_blueprint()
return app
http_server = None
def start_redis():
work_dir = getattr(bokeh_app, 'work_dir', os.getcwd())
data_file = getattr(bokeh_app, 'data_file', 'redis.db')
stdout = getattr(bokeh_app, 'stdout', sys.stdout)
stderr = getattr(bokeh_app, 'stdout', sys.stderr)
redis_save = getattr(bokeh_app, 'redis_save', True)
mproc = services.start_redis(pidfilename=os.path.join(work_dir, "bokehpids.json"),
port=bokeh_app.backend.get('redis_port', 6379),
data_dir=work_dir,
data_file=data_file,
stdout=stdout,
stderr=stderr,
save=redis_save)
bokeh_app.redis_proc = mproc
server = None
def make_tornado(config_file=None):
configure_flask(config_file=config_file)
register_blueprint()
tornado_app = make_tornado_app(flask_app=app)
return tornado_app
def start_simple_server(args=None):
global server
configure_flask(config_argparse=args)
if server_settings.model_backend.get('start-redis', False):
start_redis()
register_blueprint()
tornado_app = make_tornado_app(flask_app=app)
if args is not None and args.https:
if args.https_certfile and args.https_keyfile:
server = HTTPServer(tornado_app, ssl_options={"certfile": args.https_certfile, "keyfile": args.https_keyfile})
log.info('HTTPS Enabled')
else:
server = HTTPServer(tornado_app)
log.warning('WARNING: --https-certfile or --https-keyfile are not specified, using http instead')
else:
server = HTTPServer(tornado_app)
server.listen(server_settings.port, server_settings.ip)
ioloop.IOLoop.instance().start()
def stop():
if hasattr(bokeh_app, 'redis_proc'):
bokeh_app.redis_proc.close()
server.stop()
bokehapp = server.request_callback
bokehapp.stop_threads()
ioloop.IOLoop.instance().stop()
ioloop.IOLoop.instance().clear_instance()
| bsd-3-clause |
iancze/JudithExcalibur | scripts/DJ_image_to_FITS.py | 1 | 5644 | #!/usr/bin/env python
# Original script written by Jane Huang, CfA
# Reads in an image.out file from RADMC-3D and creates a new FITS file.
# Ideal for conversion from RADMC output to CASA simobserve, for ALMA proposals
import argparse
parser = argparse.ArgumentParser(description="Convert RADMC-3D image.out into a FITS file. Optionally provide information that will be added to the header of the FITS file.")
parser.add_argument("--image", default="image.out", help="The name of the file created by RADMC-3D.")
parser.add_argument("--fits", default="image.fits", help="The name of the FITS file to which you want to export the image.")
parser.add_argument("--dpc", default=140., type=float, help="At what distance [pc] is this source? Assumes Taurus distance by default.")
parser.add_argument("--RA", default=0, type=float, help="Assign this as the RA to the object.")
parser.add_argument("--DEC", default=0, type=float, help="Assign this as the DEC to the object.")
args = parser.parse_args()
import numpy as np
from astropy.io import fits, ascii
import os
AU = 1.49597870700e13 # [cm]
pc = 3.08567758e18 # [cm]
cc = 2.99792458e10 # [cm s^-1]
dpc = args.dpc # [pc]
RA = args.RA
DEC = args.DEC
# Read in the file from the RADMC-3D format
imagefile = open(args.image)
iformat = imagefile.readline()
im_nx, im_ny = imagefile.readline().split() #number of pixels along x and y axes
im_nx = int(im_nx)
im_ny = int(im_ny)
nlam = int(imagefile.readline())
pixsize_x, pixsize_y = imagefile.readline().split() #pixel sizes in cm for observer at infinity
pixsize_x = float(pixsize_x)
pixsize_y = float(pixsize_y)
# Differential RA and DEC
# ra = ((np.arange(im_nx) + 0.5) - im_nx/2.) * pixsize_x/(AU*dpc)
# dec = ((np.arange(im_ny) + 0.5) - im_ny/2.) * pixsize_y/(AU*dpc)
imvals = ascii.read(args.image, format = 'fast_csv', guess = False, data_start = 4, fast_reader = {'use_fast_converter':True})['1']
lams = imvals[:nlam]
# Convert lams (in microns) into frequencies in Hz
freqs = cc / (lams * 1e-4) # [Hz]
CRVAL3 = freqs[0]
single_chan = False
if len(lams) > 1:
dnu = freqs[1] - freqs[0]
assert np.all((np.diff(freqs) - dnu)/dnu < 0.01), "This conversion script does not support unequal channel widths for multi-channel imagecubes."
else:
"Single channel image, assuming bandwidth (CDELT3) is 2GHz"
dnu = 2e9 #[GHz]
single_chan = True
CDELT3 = dnu
pixsize = pixsize_x*pixsize_y/(dpc*pc)**2 #pixel size in steradians
#RADMC gives intensities in erg cm^(-2) s^(-1) Hz^(-1) ster^(-1); convert to Jy/pixel
intensities = np.reshape(imvals[nlam:],[nlam, im_ny, im_nx])* pixsize*10**23
if single_chan:
total_flux = np.sum(intensities)
freq = freqs[0]
print("Total (spatially-integrated) flux density at {:.3e} Hz flux in image: {:.3e} Jy".format(freq, total_flux))
else:
channel_flux = np.sum(intensities, axis=(1,2))
# Make a spectrum plot
# Hz on one side
# km/s on the other
# Use central channel as 0 velocity.
nu0 = np.average(freqs)
vels = cc * (freqs - nu0)/nu0 * 1e-5
integrated_flux = np.trapz(channel_flux, -vels)
print("Total (spatially and velocity) integrated flux is {:.3e} Jy - km/s, integrated over {:.1f} km/s".format(integrated_flux, vels[0] - vels[-1]))
print("See spectrum.png for visual representaion.")
import matplotlib.pyplot as plt
# plt.plot(freqs, channel_flux)
plt.plot(vels, channel_flux)
plt.xlabel(r"$\nu$ [Hz]")
plt.xlabel("v [km/s]")
plt.ylabel(r"$f_\nu$ [Jy]")
plt.savefig("spectrum.png")
# Estimate the total flux in the image. Sum all the pixels in each channel to get the flux density (e.g., Jy, measured at each channel.)
# If there is only one channel, print out a message saying that this is the flux density measured at the frequency of the image.
# Might also want to make a plot of the spectrum itself, in units of Jy (flux density).
# Then, if there is more than one channel, integrate along the frequency dimension to get a measure of the integrated line Jy-km/s, and say what it is and over what velocity range it was integrated.
# Estimate the total flux in each channel, to make a spectrum.
# Convert to float32 to store in FITS?
intensities = intensities.astype('float32')
# Now, export the image to a FITS file
#check back later to make this more general (i.e., deal with the bug in cvel)
hdu = fits.PrimaryHDU(intensities)
header = hdu.header
header['EPOCH'] = 2000.
header['EQUINOX'] = 2000.
# Latitude and Longitude of the pole of the coordinate system.
header['LATPOLE'] = -1.436915713634E+01
header['LONPOLE'] = 180.
# Define the RA coordinate
header['CTYPE1'] = 'RA---SIN'
header['CUNIT1'] = 'DEG'
cdelt1 = -pixsize_x/(pc*dpc)*180/np.pi
header['CDELT1'] = cdelt1
# Pixel coordinates of the reference point. For example, if the image is 256 pixels wide, then this
# would refer to the center of the 127th pixel.
if im_nx % 2 == 0:
header['CRPIX1'] = int(0.5*im_nx + 1)
else:
header['CRPIX1'] = int(0.5*im_nx+0.5)
header['CRVAL1'] = RA
# Define the DEC coordinate
header['CTYPE2'] = 'DEC--SIN'
header['CUNIT2'] = 'DEG'
header['CDELT2'] = -1*cdelt1 #assumes square image
if im_ny % 2 == 0:
header['CRPIX2'] = int(0.5*im_ny + 1)
else:
header['CRPIX2'] = int(0.5*im_ny+0.5)
header['CRVAL2'] = DEC
# Define the frequency coordiante
header['CTYPE3'] = 'FREQ'
header['CUNIT3'] = 'Hz'
header['CRPIX3'] = 1.
header['CDELT3'] = CDELT3
header['CRVAL3'] = CRVAL3
header['SPECSYS'] = 'LSRK'
header['VELREF'] = 257
header['BSCALE'] = 1.
header['BZERO'] = 0.
header['BUNIT'] = 'JY/PIXEL'
header['BTYPE']='Intensity'
hdu.writeto(args.fits, overwrite = True)
| mit |
arnaudsj/suds | suds/reader.py | 3 | 4381 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Contains xml document reader classes.
"""
from suds.sax.parser import Parser
from suds.transport import Request
from suds.store import DocumentStore
from logging import getLogger
log = getLogger(__name__)
class ObjectId(object):
def __init__(self, name, suffix):
self.name = name
self.suffix = suffix
class DocumentReader:
"""
The XML document reader provides an integration
between the SAX L{Parser} and the document cache.
@cvar suffix: The cache file suffix.
@type suffix: str
@ivar options: An options object.
@type options: I{Options}
"""
suffix = 'pxd'
def __init__(self, options):
"""
@param options: An options object.
@type options: I{Options}
"""
self.options = options
def open(self, url):
"""
Open an XML document at the specified I{url}.
First, the document attempted to be retrieved from
the I{object cache}. If not found, it is downloaded and
parsed using the SAX parser. The result is added to the
cache for the next open().
@param url: A document url.
@type url: str.
@return: The specified XML document.
@rtype: I{Document}
"""
id = ObjectId(url, self.suffix)
cache = self.options.cache
d = cache.get(id)
if d is None:
d = self.download(url)
cache.put(id, d)
return d
def download(self, url):
"""
Download the docuemnt.
@param url: A document url.
@type url: str.
@return: A file pointer to the docuemnt.
@rtype: file-like
"""
store = DocumentStore()
fp = store.open(url)
if fp is None:
fp = self.options.transport.open(Request(url))
sax = Parser()
return sax.parse(file=fp)
class DefinitionsReader:
"""
The WSDL definitions reader provides an integration
between the Definitions and the object cache.
@cvar suffix: The cache file suffix.
@type suffix: str
@ivar options: An options object.
@type options: I{Options}
@ivar fn: A factory function (constructor) used to
create the object not found in the cache.
@type fn: I{Constructor}
"""
suffix = 'pw'
def __init__(self, options, fn):
"""
@param options: An options object.
@type options: I{Options}
@param fn: A factory function (constructor) used to
create the object not found in the cache.
@type fn: I{Constructor}
"""
self.options = options
self.fn = fn
def open(self, url):
"""
Open a WSDL at the specified I{url}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL url.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions}
"""
id = ObjectId(url, self.suffix)
cache = self.options.cache
d = cache.get(id)
if d is None:
d = self.fn(url, self.options)
cache.put(id, d)
else:
d.options = self.options
for imp in d.imports:
imp.imported.options = self.options
return d
| lgpl-3.0 |
gluke77/rally | rally/cli/commands/plugin.py | 6 | 4031 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from rally.cli import cliutils
from rally.common.plugin import plugin
from rally.common import utils
from rally import plugins
class PluginCommands(object):
"""Set of commands that allow you to manage Rally plugins."""
@staticmethod
def _print_plugins_list(plugin_list):
rows = [utils.Struct(name=f.get_name(),
namespace=f.get_namespace(),
title=f.get_info()["title"])
for f in plugin_list]
cliutils.print_list(rows, fields=["name", "namespace", "title"])
@cliutils.args("--name", dest="name", type=str,
help="Plugin name.")
@cliutils.args("--namespace", dest="namespace", type=str,
help="Plugin namespace.")
@plugins.ensure_plugins_are_loaded
def show(self, name, namespace=None):
"""Show detailed information about a Rally plugin."""
name_lw = name.lower()
all_plugins = plugin.Plugin.get_all(namespace=namespace)
found = [p for p in all_plugins if name_lw in p.get_name().lower()]
exact_match = [p for p in found if name_lw == p.get_name().lower()]
if not found:
if namespace:
print(
"There is no plugin: %(name)s in %(namespace)s namespace"
% {"name": name, "namespace": namespace}
)
else:
print("There is no plugin: %s" % name)
elif len(found) == 1 or exact_match:
plugin_ = found[0] if len(found) == 1 else exact_match[0]
plugin_info = plugin_.get_info()
print(cliutils.make_header(plugin_info["title"]))
print("NAME\n\t%s" % plugin_info["name"])
print("NAMESPACE\n\t%s" % plugin_info["namespace"])
print("MODULE\n\t%s" % plugin_info["module"])
if plugin_info["description"]:
print("DESCRIPTION\n\t", end="")
print("\n\t".join(plugin_info["description"].split("\n")))
if plugin_info["parameters"]:
print("PARAMETERS")
rows = [utils.Struct(name=p["name"],
description="%s\n" % p["doc"])
for p in plugin_info["parameters"]]
cliutils.print_list(rows, fields=["name", "description"])
else:
print("Multiple plugins found:")
self._print_plugins_list(found)
@cliutils.args("--name", dest="name", type=str,
help="List only plugins that match the given name.")
@cliutils.args(
"--namespace", dest="namespace", type=str,
help="List only plugins that are in the specified namespace.")
@plugins.ensure_plugins_are_loaded
def list(self, name=None, namespace=None):
"""List all Rally plugins that match name and namespace."""
all_plugins = plugin.Plugin.get_all(namespace=namespace)
matched = all_plugins
if name:
name_lw = name.lower()
matched = [p for p in all_plugins
if name_lw in p.get_name().lower()]
if not all_plugins:
print("There is no plugin namespace: %s" % namespace)
elif not matched:
print("There is no plugin: %s" % name)
else:
self._print_plugins_list(matched)
| apache-2.0 |
wzbozon/statsmodels | statsmodels/sandbox/contrast_old.py | 34 | 4686 | import copy
import numpy as np
from numpy.linalg import pinv
from statsmodels.sandbox import utils_old as utils
class ContrastResults(object):
"""
Results from looking at a particular contrast of coefficients in
a parametric model. The class does nothing, it is a container
for the results from T and F contrasts.
"""
def __init__(self, t=None, F=None, sd=None, effect=None, df_denom=None,
df_num=None):
if F is not None:
self.F = F
self.df_denom = df_denom
self.df_num = df_num
else:
self.t = t
self.sd = sd
self.effect = effect
self.df_denom = df_denom
def __array__(self):
if hasattr(self, "F"):
return self.F
else:
return self.t
def __str__(self):
if hasattr(self, 'F'):
return '<F contrast: F=%s, df_denom=%d, df_num=%d>' % \
(repr(self.F), self.df_denom, self.df_num)
else:
return '<T contrast: effect=%s, sd=%s, t=%s, df_denom=%d>' % \
(repr(self.effect), repr(self.sd), repr(self.t), self.df_denom)
class Contrast(object):
"""
This class is used to construct contrast matrices in regression models.
They are specified by a (term, formula) pair.
The term, T, is a linear combination of columns of the design
matrix D=formula(). The matrix attribute is
a contrast matrix C so that
colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))
where pinv(D) is the generalized inverse of D. Further, the matrix
Tnew = dot(C, D)
is full rank. The rank attribute is the rank of
dot(D, dot(pinv(D), T))
In a regression model, the contrast tests that E(dot(Tnew, Y)) = 0
for each column of Tnew.
"""
def __init__(self, term, formula, name=''):
self.term = term
self.formula = formula
if name is '':
self.name = str(term)
else:
self.name = name
def __str__(self):
return '<contrast:%s>' % \
repr({'term':str(self.term), 'formula':str(self.formula)})
def compute_matrix(self, *args, **kw):
"""
Construct a contrast matrix C so that
colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))
where pinv(D) is the generalized inverse of D=self.D=self.formula().
If the design, self.D is already set,
then evaldesign can be set to False.
"""
t = copy.copy(self.term)
t.namespace = self.formula.namespace
T = np.transpose(np.array(t(*args, **kw)))
if T.ndim == 1:
T.shape = (T.shape[0], 1)
self.T = utils.clean0(T)
self.D = self.formula.design(*args, **kw)
self._matrix = contrastfromcols(self.T, self.D)
try:
self.rank = self.matrix.shape[1]
except:
self.rank = 1
def _get_matrix(self):
"""
This will fail if the formula needs arguments to construct
the design.
"""
if not hasattr(self, "_matrix"):
self.compute_matrix()
return self._matrix
matrix = property(_get_matrix)
def contrastfromcols(L, D, pseudo=None):
"""
From an n x p design matrix D and a matrix L, tries
to determine a p x q contrast matrix C which
determines a contrast of full rank, i.e. the
n x q matrix
dot(transpose(C), pinv(D))
is full rank.
L must satisfy either L.shape[0] == n or L.shape[1] == p.
If L.shape[0] == n, then L is thought of as representing
columns in the column space of D.
If L.shape[1] == p, then L is thought of as what is known
as a contrast matrix. In this case, this function returns an estimable
contrast corresponding to the dot(D, L.T)
Note that this always produces a meaningful contrast, not always
with the intended properties because q is always non-zero unless
L is identically 0. That is, it produces a contrast that spans
the column space of L (after projection onto the column space of D).
"""
L = np.asarray(L)
D = np.asarray(D)
n, p = D.shape
if L.shape[0] != n and L.shape[1] != p:
raise ValueError('shape of L and D mismatched')
if pseudo is None:
pseudo = pinv(D)
if L.shape[0] == n:
C = np.dot(pseudo, L).T
else:
C = L
C = np.dot(pseudo, np.dot(D, C.T)).T
Lp = np.dot(D, C.T)
if len(Lp.shape) == 1:
Lp.shape = (n, 1)
if utils.rank(Lp) != Lp.shape[1]:
Lp = utils.fullrank(Lp)
C = np.dot(pseudo, Lp).T
return np.squeeze(C)
| bsd-3-clause |
smccaffrey/blackboard_automation | tests/client_builds/PHY132_Fall2017/dueDates_V1.py | 2 | 3882 | import selenium
import getpass
import time
import sys
import logging as log
import pandas as pd
from selenium import webdriver as wbd
from selenium.webdriver.common.by import By
sys.path.append('/Users/smccaffrey/Desktop/')
#from automation import test_options as prelabs
from automation import assignment_options as lab_reports
from automation import SideBar
from automation import authorization
from automation import SectionSelector
from automation import EditTests as prelabs
### Creates the browser instance in which all operations take place ###
#driver = wbd.Chrome('/Users/smccaffrey/Desktop/BlackboardAssistant/lib/chromedriver2_26')
driver = wbd.Chrome()
filename = '/Users/smccaffrey/Desktop/BlackboardAssistant/tests/client_builds/PHY132_Fall2017/PHY132_Fall2017_v2.csv'
p = 'PHY 132: University Physics Lab II (2017 Fall)-'
URL = 'https://myasucourses.asu.edu/webapps/portal/execute/tabs/tabAction?tab_tab_group_id=_1_1'
### Parsers excel workbook (must be .csv file) ###
def parser(filename):
df1 = pd.read_csv(filename, dtype=str, delimiter=',', header=None)
return df1
### Update Prelabs information ###
def updater(d, p, URL, arr, module1, module2, dryrun=True):
i = 1
for i in range(1, len(arr[0])):
print("Choosing a section")
SectionSelector(d).find_section(module = p, section = arr[0][i], wait = 5)
#d.find_element_by_link_text(p + str(arr[0][i])).click()
SideBar(d).navigate_to(element = 'PRELABS', wait = 5)
#d.find_element_by_link_text(module1).click()
#time.sleep(5)
n = 1
for n in range (1, 11):
print("loop is starting: now select assignmentSelector")
#prelabs.assignmentSelector(driver = d, module = module1, test = arr[n+2][0], index = n)
prelabs(d).assignmentSelector(element = arr[n+2][0], wait = 5)
print("Editing SECTION: " + str(arr[0][i]) + " " + arr[n+2][0])
prelabs(d).editTestOptions(wait = 3)
prelabs(d).startRestrictCheck(state = False)
prelabs(d).endRestrictCheck(state = False)
prelabs(d).dueDate(date = arr[n+2][i])
prelabs(d).dueDateTime(time = arr[1][i])
prelabs(d).dueDateCheck(state = True)
"""
for x in range(0, 2):
prelabs(d).dueDateCheck(state = True)
for x in range(0, 2):
prelabs(d).dueDateCheck(state = True)
for x in range(0, 1):
prelabs(d).lateSubmissionCheck(state = True)
"""
#pause = raw_input("Press <ENTER> to continue: ")
if not dryrun:
prelabs(d).submit()
prelabs(d).cancel()
time.sleep(7)
d.find_element_by_link_text(module2).click()
time.sleep(3)
for n in range(1, 10):
lab_reports.assignmentSelector(driver = d, module = module2, assignment = arr[n+12][0])
print("Editing SECTION: " + str(arr[0][i]) + " " + arr[n+12][0])
time.sleep(5)
lab_reports.edit_test_options(d)
time.sleep(3)
lab_reports.start_restrict(d, False)
lab_reports.end_restrict(d, False)
lab_reports._dueDate(d, True)
lab_reports.dp_dueDate_date(d, arr[n+12][i])
lab_reports.tp_dueDate_time(d, arr[2][i])
#pause = raw_input("Press <ENTER> to continue: ")
lab_reports.cancel(d)
d.get(URL)
time.sleep(4)
### test function ###
def test_func(d, filename, dryrun=False):
parser(filename)
if not dryrun:
authorization.login(driver = d, url = URL, wait = 10)
authorization.dual_factor(driver = d, wait = 15)
updater(d, p, URL, parser(filename), module1 = 'PRELABS', module2 = 'Submit Lab Reports')
if __name__ == '__main__':
test_func(driver, filename)
| apache-2.0 |
ElricleNecro/LISA | LISA/gui/utils/matrices/utils.py | 2 | 3840 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import LISA.Matrice as m
import math as mm
__all__ = ["Perspective", "Orthographic"]
class Perspective(m.Matrix):
def __init__(self, *args, **kwargs):
super(Perspective, self).__init__(*args, **kwargs)
self._angle = 60.0
self._ratio = 16 / 9
self._minimal = 0.000001
self._maximal = 10000000.0
self._setf()
self[:] = m.Perspective(
self._angle,
self._ratio,
self._minimal,
self._maximal,
)[:]
def _setf(self):
self._f = 1. / mm.tan(self._angle / 2.0 * mm.pi / 180.)
@property
def angle(self):
return self._angle
@angle.setter
def angle(self, angle):
self._angle = angle
self._setf()
self[0, 0] = self._f / self._ratio
self[1, 1] = self._f
@property
def ratio(self):
return self._ratio
@ratio.setter
def ratio(self, ratio):
self._ratio = ratio
self[0, 0] = self._f / self._ratio
@property
def minimal(self):
return self._minimal
@minimal.setter
def minimal(self, minimal):
self._minimal = minimal
self[2, 2] = (
self._minimal + self._maximal
) / (self._minimal - self._maximal)
self[2, 3] = 2. * self._minimal * self._maximal / (
self._minimal - self._maximal
)
@property
def maximal(self):
return self._maximal
@maximal.setter
def maximal(self, maximal):
self._maximal = maximal
self[2, 2] = (
self._minimal + self._maximal
) / (self._minimal - self._maximal)
self[2, 3] = 2. * self._minimal * self._maximal / (
self._minimal - self._maximal
)
class Orthographic(m.Matrix):
def __init__(self, *args, **kwargs):
super(Orthographic, self).__init__(*args, **kwargs)
self._far = 10.
self._near = -10
self._right = 800.
self._left = 0.
self._top = 0.
self._bottom = 800.
self[:] = m.Orthographic(
self._right,
self._left,
self._top,
self._bottom,
self._near,
self._far,
)[:]
@property
def right(self):
return self._right
@right.setter
def right(self, right):
self._right = right
tmp = self._right - self._left
self[0, 0] = 2. / tmp
self[0, 3] = - (self._right + self._left) / tmp
@property
def left(self):
return self._left
@left.setter
def left(self, left):
self._left = left
tmp = self._right - self._left
self[0, 0] = 2. / tmp
self[0, 3] = - (self._right + self._left) / tmp
@property
def top(self):
return self._top
@top.setter
def top(self, top):
self._top = top
tmp = self._top - self._bottom
self[1, 1] = 2. / tmp
self[1, 3] = - (self._top + self._bottom) / tmp
@property
def bottom(self):
return self._bottom
@bottom.setter
def bottom(self, bottom):
self._bottom = bottom
tmp = self._top - self._bottom
self[1, 1] = 2. / tmp
self[1, 3] = - (self._top + self._bottom) / tmp
@property
def far(self):
return self._far
@far.setter
def far(self, far):
self._far = far
tmp = self._far - self._near
self[2, 2] = - 2. / tmp
self[2, 3] = (self._far + self._near) / tmp
@property
def near(self):
return self._near
@near.setter
def near(self, near):
self._near = near
tmp = self._far - self._near
self[2, 2] = - 2. / tmp
self[2, 3] = (self._far + self._near) / tmp
# vim: set tw=79 :
| lgpl-3.0 |
Pluto-tv/chromium-crosswalk | third_party/closure_linter/closure_linter/aliaspass_test.py | 84 | 5013 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the aliaspass module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('[email protected] (Nathan Naze)')
import unittest as googletest
from closure_linter import aliaspass
from closure_linter import errors
from closure_linter import testutil
from closure_linter.common import erroraccumulator
def _GetTokenByLineAndString(start_token, string, line_number):
for token in start_token:
if token.line_number == line_number and token.string == string:
return token
class AliasPassTest(googletest.TestCase):
def testInvalidGoogScopeCall(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT)
error_accumulator = erroraccumulator.ErrorAccumulator()
alias_pass = aliaspass.AliasPass(
error_handler=error_accumulator)
alias_pass.Process(start_token)
alias_errors = error_accumulator.GetErrors()
self.assertEquals(1, len(alias_errors))
alias_error = alias_errors[0]
self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code)
self.assertEquals('goog.scope', alias_error.token.string)
def testAliasedIdentifiers(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
alias_pass.Process(start_token)
alias_token = _GetTokenByLineAndString(start_token, 'Event', 4)
self.assertTrue(alias_token.metadata.is_alias_definition)
my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 9)
self.assertIsNone(my_class_token.metadata.aliased_symbol)
component_token = _GetTokenByLineAndString(start_token, 'Component', 17)
self.assertEquals('goog.ui.Component',
component_token.metadata.aliased_symbol)
event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 17)
self.assertEquals('goog.events.Event.Something',
event_token.metadata.aliased_symbol)
non_closurized_token = _GetTokenByLineAndString(
start_token, 'NonClosurizedClass', 18)
self.assertIsNone(non_closurized_token.metadata.aliased_symbol)
long_start_token = _GetTokenByLineAndString(start_token, 'Event.', 21)
self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod',
long_start_token.metadata.aliased_symbol)
def testMultipleGoogScopeCalls(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(
_TEST_MULTIPLE_SCOPE_SCRIPT)
error_accumulator = erroraccumulator.ErrorAccumulator()
alias_pass = aliaspass.AliasPass(
set(['goog', 'myproject']),
error_handler=error_accumulator)
alias_pass.Process(start_token)
alias_errors = error_accumulator.GetErrors()
self.assertEquals(3, len(alias_errors))
error = alias_errors[0]
self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code)
self.assertEquals(7, error.token.line_number)
error = alias_errors[1]
self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
self.assertEquals(7, error.token.line_number)
error = alias_errors[2]
self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
self.assertEquals(11, error.token.line_number)
_TEST_ALIAS_SCRIPT = """
goog.scope(function() {
var events = goog.events; // scope alias
var Event = events.
Event; // nested multiline scope alias
// This should not be registered as an aliased identifier because
// it appears before the alias.
var myClass = new MyClass();
var Component = goog.ui.Component; // scope alias
var MyClass = myproject.foo.MyClass; // scope alias
// Scope alias of non-Closurized namespace.
var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
var component = new Component(Event.Something);
var nonClosurized = NonClosurizedClass();
// A created namespace with a really long identifier.
Event.
MultilineIdentifier.
someMethod = function() {};
});
"""
_TEST_SCOPE_SCRIPT = """
function foo () {
// This goog.scope call is invalid.
goog.scope(function() {
});
}
"""
_TEST_MULTIPLE_SCOPE_SCRIPT = """
goog.scope(function() {
// do nothing
});
function foo() {
var test = goog.scope; // We should not see goog.scope mentioned.
}
// This goog.scope invalid. There can be only one.
goog.scope(function() {
});
"""
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
XiaosongWei/chromium-crosswalk | chrome/installer/tools/shortcut_properties.py | 28 | 2050 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a Windows shortcut's property bag to stdout.
This is required to confirm correctness of properties that aren't readily
available in Windows UI.
"""
import optparse
from pywintypes import IID
import sys
from win32com.propsys import propsys
from win32com.propsys import pscon
def PrintShortcutProperties(shortcut_path, dump_all):
properties = propsys.SHGetPropertyStoreFromParsingName(shortcut_path)
print 'Known properties (--dump-all for more):'
app_id = properties.GetValue(pscon.PKEY_AppUserModel_ID).GetValue()
print '\tAppUserModelId => "%s"' % app_id
# Hard code PKEY_AppUserModel_IsDualMode as pscon doesn't support it.
PKEY_AppUserModel_IsDualMode = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'),
11)
dual_mode = properties.GetValue(PKEY_AppUserModel_IsDualMode).GetValue()
print '\tDual Mode => "%s"' % dual_mode
# Dump all other properties with their raw ID if requested, add them above
# over time as we explicitly care about more properties, see propkey.h or
# pscon.py for a reference of existing PKEYs' meaning.
if dump_all:
print '\nOther properties:'
for i in range(0, properties.GetCount()):
property_key = properties.GetAt(i)
property_value = properties.GetValue(property_key).GetValue()
print '\t%s => "%s"' % (property_key, property_value)
def main():
usage = 'usage: %prog [options] "C:\\Path\\To\\My Shortcut.lnk"'
parser = optparse.OptionParser(usage,
description="Dumps a shortcut's properties.")
parser.add_option('-a', '--dump-all', action='store_true', dest='dump_all',
default=False)
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
PrintShortcutProperties(args[0], options.dump_all)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
evelynmitchell/pdq | python/pdq.py | 1 | 17664 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_pdq', [dirname(__file__)])
except ImportError:
import _pdq
return _pdq
if fp is not None:
try:
_mod = imp.load_module('_pdq', fp, pathname, description)
finally:
fp.close()
return _mod
_pdq = swig_import_helper()
del swig_import_helper
else:
import _pdq
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
TRUE = _pdq.TRUE
FALSE = _pdq.FALSE
MAXNODES = _pdq.MAXNODES
MAXBUF = _pdq.MAXBUF
MAXSTREAMS = _pdq.MAXSTREAMS
MAXCHARS = _pdq.MAXCHARS
VOID = _pdq.VOID
OPEN = _pdq.OPEN
CLOSED = _pdq.CLOSED
MEM = _pdq.MEM
CEN = _pdq.CEN
DLY = _pdq.DLY
MSQ = _pdq.MSQ
ISRV = _pdq.ISRV
FCFS = _pdq.FCFS
PSHR = _pdq.PSHR
LCFS = _pdq.LCFS
TERM = _pdq.TERM
TRANS = _pdq.TRANS
BATCH = _pdq.BATCH
EXACT = _pdq.EXACT
APPROX = _pdq.APPROX
CANON = _pdq.CANON
VISITS = _pdq.VISITS
DEMAND = _pdq.DEMAND
PDQ_SP = _pdq.PDQ_SP
PDQ_MP = _pdq.PDQ_MP
TOL = _pdq.TOL
class SYSTAT_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SYSTAT_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SYSTAT_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["response"] = _pdq.SYSTAT_TYPE_response_set
__swig_getmethods__["response"] = _pdq.SYSTAT_TYPE_response_get
if _newclass:response = _swig_property(_pdq.SYSTAT_TYPE_response_get, _pdq.SYSTAT_TYPE_response_set)
__swig_setmethods__["thruput"] = _pdq.SYSTAT_TYPE_thruput_set
__swig_getmethods__["thruput"] = _pdq.SYSTAT_TYPE_thruput_get
if _newclass:thruput = _swig_property(_pdq.SYSTAT_TYPE_thruput_get, _pdq.SYSTAT_TYPE_thruput_set)
__swig_setmethods__["residency"] = _pdq.SYSTAT_TYPE_residency_set
__swig_getmethods__["residency"] = _pdq.SYSTAT_TYPE_residency_get
if _newclass:residency = _swig_property(_pdq.SYSTAT_TYPE_residency_get, _pdq.SYSTAT_TYPE_residency_set)
__swig_setmethods__["physmem"] = _pdq.SYSTAT_TYPE_physmem_set
__swig_getmethods__["physmem"] = _pdq.SYSTAT_TYPE_physmem_get
if _newclass:physmem = _swig_property(_pdq.SYSTAT_TYPE_physmem_get, _pdq.SYSTAT_TYPE_physmem_set)
__swig_setmethods__["highwater"] = _pdq.SYSTAT_TYPE_highwater_set
__swig_getmethods__["highwater"] = _pdq.SYSTAT_TYPE_highwater_get
if _newclass:highwater = _swig_property(_pdq.SYSTAT_TYPE_highwater_get, _pdq.SYSTAT_TYPE_highwater_set)
__swig_setmethods__["malloc"] = _pdq.SYSTAT_TYPE_malloc_set
__swig_getmethods__["malloc"] = _pdq.SYSTAT_TYPE_malloc_get
if _newclass:malloc = _swig_property(_pdq.SYSTAT_TYPE_malloc_get, _pdq.SYSTAT_TYPE_malloc_set)
__swig_setmethods__["mpl"] = _pdq.SYSTAT_TYPE_mpl_set
__swig_getmethods__["mpl"] = _pdq.SYSTAT_TYPE_mpl_get
if _newclass:mpl = _swig_property(_pdq.SYSTAT_TYPE_mpl_get, _pdq.SYSTAT_TYPE_mpl_set)
__swig_setmethods__["maxN"] = _pdq.SYSTAT_TYPE_maxN_set
__swig_getmethods__["maxN"] = _pdq.SYSTAT_TYPE_maxN_get
if _newclass:maxN = _swig_property(_pdq.SYSTAT_TYPE_maxN_get, _pdq.SYSTAT_TYPE_maxN_set)
__swig_setmethods__["maxTP"] = _pdq.SYSTAT_TYPE_maxTP_set
__swig_getmethods__["maxTP"] = _pdq.SYSTAT_TYPE_maxTP_get
if _newclass:maxTP = _swig_property(_pdq.SYSTAT_TYPE_maxTP_get, _pdq.SYSTAT_TYPE_maxTP_set)
__swig_setmethods__["minRT"] = _pdq.SYSTAT_TYPE_minRT_set
__swig_getmethods__["minRT"] = _pdq.SYSTAT_TYPE_minRT_get
if _newclass:minRT = _swig_property(_pdq.SYSTAT_TYPE_minRT_get, _pdq.SYSTAT_TYPE_minRT_set)
def __init__(self):
this = _pdq.new_SYSTAT_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_SYSTAT_TYPE
__del__ = lambda self : None;
SYSTAT_TYPE_swigregister = _pdq.SYSTAT_TYPE_swigregister
SYSTAT_TYPE_swigregister(SYSTAT_TYPE)
cvar = _pdq.cvar
class TERMINAL_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TERMINAL_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TERMINAL_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _pdq.TERMINAL_TYPE_name_set
__swig_getmethods__["name"] = _pdq.TERMINAL_TYPE_name_get
if _newclass:name = _swig_property(_pdq.TERMINAL_TYPE_name_get, _pdq.TERMINAL_TYPE_name_set)
__swig_setmethods__["pop"] = _pdq.TERMINAL_TYPE_pop_set
__swig_getmethods__["pop"] = _pdq.TERMINAL_TYPE_pop_get
if _newclass:pop = _swig_property(_pdq.TERMINAL_TYPE_pop_get, _pdq.TERMINAL_TYPE_pop_set)
__swig_setmethods__["think"] = _pdq.TERMINAL_TYPE_think_set
__swig_getmethods__["think"] = _pdq.TERMINAL_TYPE_think_get
if _newclass:think = _swig_property(_pdq.TERMINAL_TYPE_think_get, _pdq.TERMINAL_TYPE_think_set)
__swig_setmethods__["sys"] = _pdq.TERMINAL_TYPE_sys_set
__swig_getmethods__["sys"] = _pdq.TERMINAL_TYPE_sys_get
if _newclass:sys = _swig_property(_pdq.TERMINAL_TYPE_sys_get, _pdq.TERMINAL_TYPE_sys_set)
def __init__(self):
this = _pdq.new_TERMINAL_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_TERMINAL_TYPE
__del__ = lambda self : None;
TERMINAL_TYPE_swigregister = _pdq.TERMINAL_TYPE_swigregister
TERMINAL_TYPE_swigregister(TERMINAL_TYPE)
class BATCH_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, BATCH_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, BATCH_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _pdq.BATCH_TYPE_name_set
__swig_getmethods__["name"] = _pdq.BATCH_TYPE_name_get
if _newclass:name = _swig_property(_pdq.BATCH_TYPE_name_get, _pdq.BATCH_TYPE_name_set)
__swig_setmethods__["pop"] = _pdq.BATCH_TYPE_pop_set
__swig_getmethods__["pop"] = _pdq.BATCH_TYPE_pop_get
if _newclass:pop = _swig_property(_pdq.BATCH_TYPE_pop_get, _pdq.BATCH_TYPE_pop_set)
__swig_setmethods__["sys"] = _pdq.BATCH_TYPE_sys_set
__swig_getmethods__["sys"] = _pdq.BATCH_TYPE_sys_get
if _newclass:sys = _swig_property(_pdq.BATCH_TYPE_sys_get, _pdq.BATCH_TYPE_sys_set)
def __init__(self):
this = _pdq.new_BATCH_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_BATCH_TYPE
__del__ = lambda self : None;
BATCH_TYPE_swigregister = _pdq.BATCH_TYPE_swigregister
BATCH_TYPE_swigregister(BATCH_TYPE)
class TRANSACTION_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TRANSACTION_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TRANSACTION_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _pdq.TRANSACTION_TYPE_name_set
__swig_getmethods__["name"] = _pdq.TRANSACTION_TYPE_name_get
if _newclass:name = _swig_property(_pdq.TRANSACTION_TYPE_name_get, _pdq.TRANSACTION_TYPE_name_set)
__swig_setmethods__["arrival_rate"] = _pdq.TRANSACTION_TYPE_arrival_rate_set
__swig_getmethods__["arrival_rate"] = _pdq.TRANSACTION_TYPE_arrival_rate_get
if _newclass:arrival_rate = _swig_property(_pdq.TRANSACTION_TYPE_arrival_rate_get, _pdq.TRANSACTION_TYPE_arrival_rate_set)
__swig_setmethods__["saturation_rate"] = _pdq.TRANSACTION_TYPE_saturation_rate_set
__swig_getmethods__["saturation_rate"] = _pdq.TRANSACTION_TYPE_saturation_rate_get
if _newclass:saturation_rate = _swig_property(_pdq.TRANSACTION_TYPE_saturation_rate_get, _pdq.TRANSACTION_TYPE_saturation_rate_set)
__swig_setmethods__["sys"] = _pdq.TRANSACTION_TYPE_sys_set
__swig_getmethods__["sys"] = _pdq.TRANSACTION_TYPE_sys_get
if _newclass:sys = _swig_property(_pdq.TRANSACTION_TYPE_sys_get, _pdq.TRANSACTION_TYPE_sys_set)
def __init__(self):
this = _pdq.new_TRANSACTION_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_TRANSACTION_TYPE
__del__ = lambda self : None;
TRANSACTION_TYPE_swigregister = _pdq.TRANSACTION_TYPE_swigregister
TRANSACTION_TYPE_swigregister(TRANSACTION_TYPE)
class JOB_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, JOB_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, JOB_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["should_be_class"] = _pdq.JOB_TYPE_should_be_class_set
__swig_getmethods__["should_be_class"] = _pdq.JOB_TYPE_should_be_class_get
if _newclass:should_be_class = _swig_property(_pdq.JOB_TYPE_should_be_class_get, _pdq.JOB_TYPE_should_be_class_set)
__swig_setmethods__["network"] = _pdq.JOB_TYPE_network_set
__swig_getmethods__["network"] = _pdq.JOB_TYPE_network_get
if _newclass:network = _swig_property(_pdq.JOB_TYPE_network_get, _pdq.JOB_TYPE_network_set)
__swig_setmethods__["term"] = _pdq.JOB_TYPE_term_set
__swig_getmethods__["term"] = _pdq.JOB_TYPE_term_get
if _newclass:term = _swig_property(_pdq.JOB_TYPE_term_get, _pdq.JOB_TYPE_term_set)
__swig_setmethods__["batch"] = _pdq.JOB_TYPE_batch_set
__swig_getmethods__["batch"] = _pdq.JOB_TYPE_batch_get
if _newclass:batch = _swig_property(_pdq.JOB_TYPE_batch_get, _pdq.JOB_TYPE_batch_set)
__swig_setmethods__["trans"] = _pdq.JOB_TYPE_trans_set
__swig_getmethods__["trans"] = _pdq.JOB_TYPE_trans_get
if _newclass:trans = _swig_property(_pdq.JOB_TYPE_trans_get, _pdq.JOB_TYPE_trans_set)
def __init__(self):
this = _pdq.new_JOB_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_JOB_TYPE
__del__ = lambda self : None;
JOB_TYPE_swigregister = _pdq.JOB_TYPE_swigregister
JOB_TYPE_swigregister(JOB_TYPE)
class NODE_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, NODE_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, NODE_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["devtype"] = _pdq.NODE_TYPE_devtype_set
__swig_getmethods__["devtype"] = _pdq.NODE_TYPE_devtype_get
if _newclass:devtype = _swig_property(_pdq.NODE_TYPE_devtype_get, _pdq.NODE_TYPE_devtype_set)
__swig_setmethods__["sched"] = _pdq.NODE_TYPE_sched_set
__swig_getmethods__["sched"] = _pdq.NODE_TYPE_sched_get
if _newclass:sched = _swig_property(_pdq.NODE_TYPE_sched_get, _pdq.NODE_TYPE_sched_set)
__swig_setmethods__["devname"] = _pdq.NODE_TYPE_devname_set
__swig_getmethods__["devname"] = _pdq.NODE_TYPE_devname_get
if _newclass:devname = _swig_property(_pdq.NODE_TYPE_devname_get, _pdq.NODE_TYPE_devname_set)
__swig_setmethods__["visits"] = _pdq.NODE_TYPE_visits_set
__swig_getmethods__["visits"] = _pdq.NODE_TYPE_visits_get
if _newclass:visits = _swig_property(_pdq.NODE_TYPE_visits_get, _pdq.NODE_TYPE_visits_set)
__swig_setmethods__["service"] = _pdq.NODE_TYPE_service_set
__swig_getmethods__["service"] = _pdq.NODE_TYPE_service_get
if _newclass:service = _swig_property(_pdq.NODE_TYPE_service_get, _pdq.NODE_TYPE_service_set)
__swig_setmethods__["demand"] = _pdq.NODE_TYPE_demand_set
__swig_getmethods__["demand"] = _pdq.NODE_TYPE_demand_get
if _newclass:demand = _swig_property(_pdq.NODE_TYPE_demand_get, _pdq.NODE_TYPE_demand_set)
__swig_setmethods__["resit"] = _pdq.NODE_TYPE_resit_set
__swig_getmethods__["resit"] = _pdq.NODE_TYPE_resit_get
if _newclass:resit = _swig_property(_pdq.NODE_TYPE_resit_get, _pdq.NODE_TYPE_resit_set)
__swig_setmethods__["utiliz"] = _pdq.NODE_TYPE_utiliz_set
__swig_getmethods__["utiliz"] = _pdq.NODE_TYPE_utiliz_get
if _newclass:utiliz = _swig_property(_pdq.NODE_TYPE_utiliz_get, _pdq.NODE_TYPE_utiliz_set)
__swig_setmethods__["qsize"] = _pdq.NODE_TYPE_qsize_set
__swig_getmethods__["qsize"] = _pdq.NODE_TYPE_qsize_get
if _newclass:qsize = _swig_property(_pdq.NODE_TYPE_qsize_get, _pdq.NODE_TYPE_qsize_set)
__swig_setmethods__["avqsize"] = _pdq.NODE_TYPE_avqsize_set
__swig_getmethods__["avqsize"] = _pdq.NODE_TYPE_avqsize_get
if _newclass:avqsize = _swig_property(_pdq.NODE_TYPE_avqsize_get, _pdq.NODE_TYPE_avqsize_set)
def __init__(self):
this = _pdq.new_NODE_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_NODE_TYPE
__del__ = lambda self : None;
NODE_TYPE_swigregister = _pdq.NODE_TYPE_swigregister
NODE_TYPE_swigregister(NODE_TYPE)
def CreateClosed(*args):
return _pdq.CreateClosed(*args)
CreateClosed = _pdq.CreateClosed
def CreateClosed_p(*args):
return _pdq.CreateClosed_p(*args)
CreateClosed_p = _pdq.CreateClosed_p
def CreateOpen(*args):
return _pdq.CreateOpen(*args)
CreateOpen = _pdq.CreateOpen
def CreateOpen_p(*args):
return _pdq.CreateOpen_p(*args)
CreateOpen_p = _pdq.CreateOpen_p
def CreateNode(*args):
return _pdq.CreateNode(*args)
CreateNode = _pdq.CreateNode
def CreateMultiNode(*args):
return _pdq.CreateMultiNode(*args)
CreateMultiNode = _pdq.CreateMultiNode
def GetStreamsCount():
return _pdq.GetStreamsCount()
GetStreamsCount = _pdq.GetStreamsCount
def GetNodesCount():
return _pdq.GetNodesCount()
GetNodesCount = _pdq.GetNodesCount
def GetResponse(*args):
return _pdq.GetResponse(*args)
GetResponse = _pdq.GetResponse
def GetResidenceTime(*args):
return _pdq.GetResidenceTime(*args)
GetResidenceTime = _pdq.GetResidenceTime
def GetThruput(*args):
return _pdq.GetThruput(*args)
GetThruput = _pdq.GetThruput
def GetLoadOpt(*args):
return _pdq.GetLoadOpt(*args)
GetLoadOpt = _pdq.GetLoadOpt
def GetUtilization(*args):
return _pdq.GetUtilization(*args)
GetUtilization = _pdq.GetUtilization
def GetQueueLength(*args):
return _pdq.GetQueueLength(*args)
GetQueueLength = _pdq.GetQueueLength
def PDQ_GetThruMax(*args):
return _pdq.PDQ_GetThruMax(*args)
PDQ_GetThruMax = _pdq.PDQ_GetThruMax
def Init(*args):
return _pdq.Init(*args)
Init = _pdq.Init
def Report():
return _pdq.Report()
Report = _pdq.Report
def SetDebug(*args):
return _pdq.SetDebug(*args)
SetDebug = _pdq.SetDebug
def SetDemand(*args):
return _pdq.SetDemand(*args)
SetDemand = _pdq.SetDemand
def SetDemand_p(*args):
return _pdq.SetDemand_p(*args)
SetDemand_p = _pdq.SetDemand_p
def SetVisits(*args):
return _pdq.SetVisits(*args)
SetVisits = _pdq.SetVisits
def SetVisits_p(*args):
return _pdq.SetVisits_p(*args)
SetVisits_p = _pdq.SetVisits_p
def Solve(*args):
return _pdq.Solve(*args)
Solve = _pdq.Solve
def SetWUnit(*args):
return _pdq.SetWUnit(*args)
SetWUnit = _pdq.SetWUnit
def SetTUnit(*args):
return _pdq.SetTUnit(*args)
SetTUnit = _pdq.SetTUnit
def SetComment(*args):
return _pdq.SetComment(*args)
SetComment = _pdq.SetComment
def GetComment():
return _pdq.GetComment()
GetComment = _pdq.GetComment
def PrintNodes():
return _pdq.PrintNodes()
PrintNodes = _pdq.PrintNodes
def GetNode(*args):
return _pdq.GetNode(*args)
GetNode = _pdq.GetNode
def getjob(*args):
return _pdq.getjob(*args)
getjob = _pdq.getjob
def resets(*args):
return _pdq.resets(*args)
resets = _pdq.resets
def debug(*args):
return _pdq.debug(*args)
debug = _pdq.debug
def errmsg(*args):
return _pdq.errmsg(*args)
errmsg = _pdq.errmsg
def approx():
return _pdq.approx()
approx = _pdq.approx
def canonical():
return _pdq.canonical()
canonical = _pdq.canonical
def exact():
return _pdq.exact()
exact = _pdq.exact
def getjob_index(*args):
return _pdq.getjob_index(*args)
getjob_index = _pdq.getjob_index
def getjob_name(*args):
return _pdq.getjob_name(*args)
getjob_name = _pdq.getjob_name
def getnode_index(*args):
return _pdq.getnode_index(*args)
getnode_index = _pdq.getnode_index
def typetostr(*args):
return _pdq.typetostr(*args)
typetostr = _pdq.typetostr
# This file is compatible with both classic and new-style classes.
| mit |
erdc-cm/air-water-vv | 2d/hydraulicStructures/broad_crested_weir/kappa_p.py | 2 | 1788 | from proteus.default_p import *
from proteus import Context
from proteus.mprans import Kappa
ct = Context.get()
domain = ct.domain
nd = domain.nd
LevelModelType = Kappa.LevelModel
if ct.useOnlyVF:
RD_model = None
LS_model = None
dissipation_model = 3
ME_model = 2
else:
RD_model = 3
LS_model = 2
ME_model = 5
dissipation_model = 6
#
dissipation_model_flag = 1
if ct.useRANS == 2:
dissipation_model_flag=2
coefficients = Kappa.Coefficients(V_model=0,
ME_model=ME_model,
LS_model=LS_model,
RD_model=RD_model,
dissipation_model=dissipation_model,
dissipation_model_flag=dissipation_model_flag,#1 -- K-epsilon, 2 -- K-omega
useMetrics=ct.useMetrics,
rho_0=ct.rho_0,nu_0=ct.nu_0,
rho_1=ct.rho_1,nu_1=ct.nu_1,
g=ct.g,
nd=ct.domain.nd,
c_mu=0.09,
sigma_k=1.0,
sc_uref=ct.kappa_sc_uref,
sc_beta=ct.kappa_sc_beta)
dirichletConditions = {0: lambda x, flag: domain.bc[flag].k_dirichlet.init_cython()}
advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.bc[flag].k_advective.init_cython()}
diffusiveFluxBoundaryConditions = {0: {0: lambda x, flag: domain.bc[flag].k_diffusive.init_cython()}}
class ConstantIC:
def __init__(self,cval=0.0):
self.cval=cval
def uOfXT(self,x,t):
return self.cval
initialConditions = {0:ConstantIC(cval=kInflow*0.001)}
| mit |
ammaradil/fibonacci | Lib/site-packages/django/contrib/auth/password_validation.py | 57 | 7538 | from __future__ import unicode_literals
import gzip
import os
import re
from difflib import SequenceMatcher
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils.html import format_html
from django.utils.module_loading import import_string
from django.utils.six import string_types, text_type
from django.utils.translation import ugettext as _, ungettext
@lru_cache.lru_cache(maxsize=None)
def get_default_password_validators():
return get_password_validators(settings.AUTH_PASSWORD_VALIDATORS)
def get_password_validators(validator_config):
validators = []
for validator in validator_config:
try:
klass = import_string(validator['NAME'])
except ImportError:
msg = "The module in NAME could not be imported: %s. Check your AUTH_PASSWORD_VALIDATORS setting."
raise ImproperlyConfigured(msg % validator['NAME'])
validators.append(klass(**validator.get('OPTIONS', {})))
return validators
def validate_password(password, user=None, password_validators=None):
"""
Validate whether the password meets all validator requirements.
If the password is valid, return ``None``.
If the password is invalid, raise ValidationError with all error messages.
"""
errors = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
try:
validator.validate(password, user)
except ValidationError as error:
errors.append(error)
if errors:
raise ValidationError(errors)
def password_changed(password, user=None, password_validators=None):
"""
Inform all validators that have implemented a password_changed() method
that the password has been changed.
"""
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
password_changed = getattr(validator, 'password_changed', lambda *a: None)
password_changed(password, user)
def password_validators_help_texts(password_validators=None):
"""
Return a list of all help texts of all configured validators.
"""
help_texts = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
help_texts.append(validator.get_help_text())
return help_texts
def _password_validators_help_text_html(password_validators=None):
"""
Return an HTML string with all help texts of all configured validators
in an <ul>.
"""
help_texts = password_validators_help_texts(password_validators)
help_items = [format_html('<li>{}</li>', help_text) for help_text in help_texts]
return '<ul>%s</ul>' % ''.join(help_items) if help_items else ''
password_validators_help_text_html = lazy(_password_validators_help_text_html, text_type)
class MinimumLengthValidator(object):
"""
Validate whether the password is of a minimum length.
"""
def __init__(self, min_length=8):
self.min_length = min_length
def validate(self, password, user=None):
if len(password) < self.min_length:
raise ValidationError(
ungettext(
"This password is too short. It must contain at least %(min_length)d character.",
"This password is too short. It must contain at least %(min_length)d characters.",
self.min_length
),
code='password_too_short',
params={'min_length': self.min_length},
)
def get_help_text(self):
return ungettext(
"Your password must contain at least %(min_length)d character.",
"Your password must contain at least %(min_length)d characters.",
self.min_length
) % {'min_length': self.min_length}
class UserAttributeSimilarityValidator(object):
"""
Validate whether the password is sufficiently different from the user's
attributes.
If no specific attributes are provided, look at a sensible list of
defaults. Attributes that don't exist are ignored. Comparison is made to
not only the full attribute value, but also its components, so that, for
example, a password is validated against either part of an email address,
as well as the full address.
"""
DEFAULT_USER_ATTRIBUTES = ('username', 'first_name', 'last_name', 'email')
def __init__(self, user_attributes=DEFAULT_USER_ATTRIBUTES, max_similarity=0.7):
self.user_attributes = user_attributes
self.max_similarity = max_similarity
def validate(self, password, user=None):
if not user:
return
for attribute_name in self.user_attributes:
value = getattr(user, attribute_name, None)
if not value or not isinstance(value, string_types):
continue
value_parts = re.split('\W+', value) + [value]
for value_part in value_parts:
if SequenceMatcher(a=password.lower(), b=value_part.lower()).quick_ratio() > self.max_similarity:
verbose_name = force_text(user._meta.get_field(attribute_name).verbose_name)
raise ValidationError(
_("The password is too similar to the %(verbose_name)s."),
code='password_too_similar',
params={'verbose_name': verbose_name},
)
def get_help_text(self):
return _("Your password can't be too similar to your other personal information.")
class CommonPasswordValidator(object):
"""
Validate whether the password is a common password.
The password is rejected if it occurs in a provided list, which may be gzipped.
The list Django ships with contains 1000 common passwords, created by Mark Burnett:
https://xato.net/passwords/more-top-worst-passwords/
"""
DEFAULT_PASSWORD_LIST_PATH = os.path.join(
os.path.dirname(os.path.realpath(upath(__file__))), 'common-passwords.txt.gz'
)
def __init__(self, password_list_path=DEFAULT_PASSWORD_LIST_PATH):
try:
common_passwords_lines = gzip.open(password_list_path).read().decode('utf-8').splitlines()
except IOError:
with open(password_list_path) as f:
common_passwords_lines = f.readlines()
self.passwords = {p.strip() for p in common_passwords_lines}
def validate(self, password, user=None):
if password.lower().strip() in self.passwords:
raise ValidationError(
_("This password is too common."),
code='password_too_common',
)
def get_help_text(self):
return _("Your password can't be a commonly used password.")
class NumericPasswordValidator(object):
"""
Validate whether the password is alphanumeric.
"""
def validate(self, password, user=None):
if password.isdigit():
raise ValidationError(
_("This password is entirely numeric."),
code='password_entirely_numeric',
)
def get_help_text(self):
return _("Your password can't be entirely numeric.")
| mit |
lthurlow/Boolean-Constrained-Routing | networkx-1.8.1/build/lib/networkx/algorithms/components/tests/test_biconnected.py | 35 | 6168 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
from networkx.algorithms.components import biconnected
def assert_components_equal(x,y):
sx = set((frozenset([frozenset(e) for e in c]) for c in x))
sy = set((frozenset([frozenset(e) for e in c]) for c in y))
assert_equal(sx,sy)
def test_barbell():
G=nx.barbell_graph(8,4)
G.add_path([7,20,21,22])
G.add_cycle([22,23,24,25])
pts=set(biconnected.articulation_points(G))
assert_equal(pts,set([7,8,9,10,11,12,20,21,22]))
answer = [set([12, 13, 14, 15, 16, 17, 18, 19]),
set([0, 1, 2, 3, 4, 5, 6, 7]),
set([22, 23, 24, 25]),
set([11, 12]),
set([10, 11]),
set([9, 10]),
set([8, 9]),
set([7, 8]),
set([21, 22]),
set([20, 21]),
set([7, 20])]
bcc=list(biconnected.biconnected_components(G))
bcc.sort(key=len, reverse=True)
assert_equal(bcc,answer)
G.add_edge(2,17)
pts=set(biconnected.articulation_points(G))
assert_equal(pts,set([7,20,21,22]))
def test_articulation_points_cycle():
G=nx.cycle_graph(3)
G.add_cycle([1,3,4])
pts=set(biconnected.articulation_points(G))
assert_equal(pts,set([1]))
def test_is_biconnected():
G=nx.cycle_graph(3)
assert_true(biconnected.is_biconnected(G))
G.add_cycle([1,3,4])
assert_false(biconnected.is_biconnected(G))
def test_empty_is_biconnected():
G=nx.empty_graph(5)
assert_false(biconnected.is_biconnected(G))
G.add_edge(0,1)
assert_false(biconnected.is_biconnected(G))
def test_biconnected_components_cycle():
G=nx.cycle_graph(3)
G.add_cycle([1,3,4])
pts = set(map(frozenset,biconnected.biconnected_components(G)))
assert_equal(pts,set([frozenset([0,1,2]),frozenset([1,3,4])]))
def test_biconnected_component_subgraphs_cycle():
G=nx.cycle_graph(3)
G.add_cycle([1,3,4,5])
G.add_edge(1,3,eattr='red') # test copying of edge data
G.node[1]['nattr']='blue'
G.graph['gattr']='green'
Gc = set(biconnected.biconnected_component_subgraphs(G))
assert_equal(len(Gc),2)
g1,g2=Gc
if 0 in g1:
assert_true(nx.is_isomorphic(g1,nx.Graph([(0,1),(0,2),(1,2)])))
assert_true(nx.is_isomorphic(g2,nx.Graph([(1,3),(1,5),(3,4),(4,5)])))
assert_equal(g2[1][3]['eattr'],'red')
assert_equal(g2.node[1]['nattr'],'blue')
assert_equal(g2.graph['gattr'],'green')
g2[1][3]['eattr']='blue'
assert_equal(g2[1][3]['eattr'],'blue')
assert_equal(G[1][3]['eattr'],'red')
else:
assert_true(nx.is_isomorphic(g1,nx.Graph([(1,3),(1,5),(3,4),(4,5)])))
assert_true(nx.is_isomorphic(g2,nx.Graph([(0,1),(0,2),(1,2)])))
assert_equal(g1[1][3]['eattr'],'red')
assert_equal(g1.node[1]['nattr'],'blue')
assert_equal(g1.graph['gattr'],'green')
g1[1][3]['eattr']='blue'
assert_equal(g1[1][3]['eattr'],'blue')
assert_equal(G[1][3]['eattr'],'red')
def test_biconnected_components1():
# graph example from
# http://www.ibluemojo.com/school/articul_algorithm.html
edges=[(0,1),
(0,5),
(0,6),
(0,14),
(1,5),
(1,6),
(1,14),
(2,4),
(2,10),
(3,4),
(3,15),
(4,6),
(4,7),
(4,10),
(5,14),
(6,14),
(7,9),
(8,9),
(8,12),
(8,13),
(10,15),
(11,12),
(11,13),
(12,13)]
G=nx.Graph(edges)
pts = set(biconnected.articulation_points(G))
assert_equal(pts,set([4,6,7,8,9]))
comps = list(biconnected.biconnected_component_edges(G))
answer = [
[(3,4),(15,3),(10,15),(10,4),(2,10),(4,2)],
[(13,12),(13,8),(11,13),(12,11),(8,12)],
[(9,8)],
[(7,9)],
[(4,7)],
[(6,4)],
[(14,0),(5,1),(5,0),(14,5),(14,1),(6,14),(6,0),(1,6),(0,1)],
]
assert_components_equal(comps,answer)
def test_biconnected_components2():
G=nx.Graph()
G.add_cycle('ABC')
G.add_cycle('CDE')
G.add_cycle('FIJHG')
G.add_cycle('GIJ')
G.add_edge('E','G')
comps = list(biconnected.biconnected_component_edges(G))
answer = [
[tuple('GF'),tuple('FI'),tuple('IG'),tuple('IJ'),tuple('JG'),tuple('JH'),tuple('HG')],
[tuple('EG')],
[tuple('CD'),tuple('DE'),tuple('CE')],
[tuple('AB'),tuple('BC'),tuple('AC')]
]
assert_components_equal(comps,answer)
def test_biconnected_davis():
D = nx.davis_southern_women_graph()
bcc = list(biconnected.biconnected_components(D))[0]
assert_true(set(D) == bcc) # All nodes in a giant bicomponent
# So no articulation points
assert_equal(list(biconnected.articulation_points(D)),[])
def test_biconnected_karate():
K = nx.karate_club_graph()
answer = [set([0, 1, 2, 3, 7, 8, 9, 12, 13, 14, 15, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]),
set([0, 4, 5, 6, 10, 16]),
set([0, 11])]
bcc = list(biconnected.biconnected_components(K))
bcc.sort(key=len, reverse=True)
assert_true(list(biconnected.biconnected_components(K)) == answer)
assert_equal(list(biconnected.articulation_points(K)),[0])
def test_biconnected_eppstein():
# tests from http://www.ics.uci.edu/~eppstein/PADS/Biconnectivity.py
G1 = nx.Graph({
0: [1,2,5],
1: [0,5],
2: [0,3,4],
3: [2,4,5,6],
4: [2,3,5,6],
5: [0,1,3,4],
6: [3,4]})
G2 = nx.Graph({
0: [2,5],
1: [3,8],
2: [0,3,5],
3: [1,2,6,8],
4: [7],
5: [0,2],
6: [3,8],
7: [4],
8: [1,3,6]})
assert_true(biconnected.is_biconnected(G1))
assert_false(biconnected.is_biconnected(G2))
answer_G2 = [set([1, 3, 6, 8]), set([0, 2, 5]), set([2, 3]), set([4, 7])]
bcc = list(biconnected.biconnected_components(G2))
bcc.sort(key=len, reverse=True)
assert_equal(bcc, answer_G2)
| mit |
evanson/yowsup | yowsup/common/tools.py | 30 | 4444 | import time,datetime,re, hashlib
from dateutil import tz
import os
from .constants import YowConstants
import codecs, sys
import logging
import tempfile
import base64
import hashlib
logger = logging.getLogger(__name__)
class HexTools:
decode_hex = codecs.getdecoder("hex_codec")
@staticmethod
def decodeHex(hexString):
result = HexTools.decode_hex(hexString)[0]
if sys.version_info >= (3,0):
result = result.decode('latin-1')
return result
class WATools:
@staticmethod
def generateIdentity():
return os.urandom(20)
@staticmethod
def getFileHashForUpload(filePath):
sha1 = hashlib.sha256()
f = open(filePath, 'rb')
try:
sha1.update(f.read())
finally:
f.close()
b64Hash = base64.b64encode(sha1.digest())
return b64Hash if type(b64Hash) is str else b64Hash.decode()
class StorageTools:
@staticmethod
def constructPath(*path):
path = os.path.join(*path)
fullPath = os.path.expanduser(os.path.join(YowConstants.PATH_STORAGE, path))
if not os.path.exists(os.path.dirname(fullPath)):
os.makedirs(os.path.dirname(fullPath))
return fullPath
@staticmethod
def getStorageForPhone(phone):
return StorageTools.constructPath(phone + '/')
@staticmethod
def writeIdentity(phone, identity):
path = StorageTools.getStorageForPhone(phone)
with open(os.path.join(path, "id"), 'wb') as idFile:
idFile.write(identity)
@staticmethod
def getIdentity(phone):
path = StorageTools.getStorageForPhone(phone)
out = None
idPath = os.path.join(path, "id")
if os.path.isfile(idPath):
with open(idPath, 'rb') as idFile:
out = idFile.read()
return out
@staticmethod
def writeNonce(phone, nonce):
path = StorageTools.getStorageForPhone(phone)
with open(os.path.join(path, "nonce"), 'wb') as idFile:
idFile.write(nonce.encode("latin-1") if sys.version_info >= (3,0) else nonce)
@staticmethod
def getNonce(phone):
path = StorageTools.getStorageForPhone(phone)
out = None
noncePath = os.path.join(path, "nonce")
if os.path.isfile(noncePath):
with open(noncePath, 'rb') as idFile:
out = idFile.read()
return out
class TimeTools:
@staticmethod
def parseIso(iso):
d=datetime.datetime(*map(int, re.split('[^\d]', iso)[:-1]))
return d
@staticmethod
def utcToLocal(dt):
utc = tz.gettz('UTC')
local = tz.tzlocal()
dtUtc = dt.replace(tzinfo=utc)
return dtUtc.astimezone(local)
@staticmethod
def utcTimestamp():
#utc = tz.gettz('UTC')
utcNow = datetime.datetime.utcnow()
return TimeTools.datetimeToTimestamp(utcNow)
@staticmethod
def datetimeToTimestamp(dt):
return time.mktime(dt.timetuple())
class ModuleTools:
@staticmethod
def INSTALLED_PIL():
try:
import PIL
return True
except ImportError:
return False
@staticmethod
def INSTALLED_AXOLOTL():
try:
import axolotl
return True
except ImportError:
return False
class ImageTools:
@staticmethod
def scaleImage(infile, outfile, imageFormat, width, height):
if ModuleTools.INSTALLED_PIL():
from PIL import Image
im = Image.open(infile)
im.thumbnail((width, height))
im.save(outfile, imageFormat)
return True
else:
logger.warn("Python PIL library not installed")
return False
@staticmethod
def getImageDimensions(imageFile):
if ModuleTools.INSTALLED_PIL():
from PIL import Image
im = Image.open(imageFile)
return im.size
else:
logger.warn("Python PIL library not installed")
@staticmethod
def generatePreviewFromImage(image):
fd, path = tempfile.mkstemp()
fileObj = os.fdopen(fd, "rb+")
preview = None
if ImageTools.scaleImage(image, fileObj, "JPEG", YowConstants.PREVIEW_WIDTH, YowConstants.PREVIEW_HEIGHT):
fileObj.seek(0)
preview = fileObj.read()
fileObj.close()
return preview | gpl-3.0 |
orekyuu/intellij-community | python/helpers/profiler/thrift/transport/TTwisted.py | 97 | 10563 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import struct
from cStringIO import StringIO
from zope.interface import implements, Interface, Attribute
from twisted.internet.protocol import ServerFactory, ClientFactory, \
connectionDone
from twisted.internet import defer
from twisted.internet.threads import deferToThread
from twisted.protocols import basic
from twisted.web import server, resource, http
from thrift.transport import TTransport
class TMessageSenderTransport(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = StringIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = StringIO()
return self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
class TCallbackTransport(TMessageSenderTransport):
def __init__(self, func):
TMessageSenderTransport.__init__(self)
self.func = func
def sendMessage(self, message):
return self.func(message)
class ThriftClientProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self._client_class = client_class
self._iprot_factory = iprot_factory
if oprot_factory is None:
self._oprot_factory = iprot_factory
else:
self._oprot_factory = oprot_factory
self.recv_map = {}
self.started = defer.Deferred()
def dispatch(self, msg):
self.sendString(msg)
def connectionMade(self):
tmo = TCallbackTransport(self.dispatch)
self.client = self._client_class(tmo, self._oprot_factory)
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
for k, v in self.client._reqs.iteritems():
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message='Connection closed')
v.errback(tex)
def stringReceived(self, frame):
tr = TTransport.TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
(fname, mtype, rseqid) = iprot.readMessageBegin()
try:
method = self.recv_map[fname]
except KeyError:
method = getattr(self.client, 'recv_' + fname)
self.recv_map[fname] = method
method(iprot, mtype, rseqid)
class ThriftSASLClientProtocol(ThriftClientProtocol):
START = 1
OK = 2
BAD = 3
ERROR = 4
COMPLETE = 5
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None,
host=None, service=None, mechanism='GSSAPI', **sasl_kwargs):
"""
host: the name of the server, from a SASL perspective
service: the name of the server's service, from a SASL perspective
mechanism: the name of the preferred mechanism to use
All other kwargs will be passed to the puresasl.client.SASLClient
constructor.
"""
from puresasl.client import SASLClient
self.SASLCLient = SASLClient
ThriftClientProtocol.__init__(self, client_class, iprot_factory, oprot_factory)
self._sasl_negotiation_deferred = None
self._sasl_negotiation_status = None
self.client = None
if host is not None:
self.createSASLClient(host, service, mechanism, **sasl_kwargs)
def createSASLClient(self, host, service, mechanism, **kwargs):
self.sasl = self.SASLClient(host, service, mechanism, **kwargs)
def dispatch(self, msg):
encoded = self.sasl.wrap(msg)
len_and_encoded = ''.join((struct.pack('!i', len(encoded)), encoded))
ThriftClientProtocol.dispatch(self, len_and_encoded)
@defer.inlineCallbacks
def connectionMade(self):
self._sendSASLMessage(self.START, self.sasl.mechanism)
initial_message = yield deferToThread(self.sasl.process)
self._sendSASLMessage(self.OK, initial_message)
while True:
status, challenge = yield self._receiveSASLMessage()
if status == self.OK:
response = yield deferToThread(self.sasl.process, challenge)
self._sendSASLMessage(self.OK, response)
elif status == self.COMPLETE:
if not self.sasl.complete:
msg = "The server erroneously indicated that SASL " \
"negotiation was complete"
raise TTransport.TTransportException(msg, message=msg)
else:
break
else:
msg = "Bad SASL negotiation status: %d (%s)" % (status, challenge)
raise TTransport.TTransportException(msg, message=msg)
self._sasl_negotiation_deferred = None
ThriftClientProtocol.connectionMade(self)
def _sendSASLMessage(self, status, body):
if body is None:
body = ""
header = struct.pack(">BI", status, len(body))
self.transport.write(header + body)
def _receiveSASLMessage(self):
self._sasl_negotiation_deferred = defer.Deferred()
self._sasl_negotiation_status = None
return self._sasl_negotiation_deferred
def connectionLost(self, reason=connectionDone):
if self.client:
ThriftClientProtocol.connectionLost(self, reason)
def dataReceived(self, data):
if self._sasl_negotiation_deferred:
# we got a sasl challenge in the format (status, length, challenge)
# save the status, let IntNStringReceiver piece the challenge data together
self._sasl_negotiation_status, = struct.unpack("B", data[0])
ThriftClientProtocol.dataReceived(self, data[1:])
else:
# normal frame, let IntNStringReceiver piece it together
ThriftClientProtocol.dataReceived(self, data)
def stringReceived(self, frame):
if self._sasl_negotiation_deferred:
# the frame is just a SASL challenge
response = (self._sasl_negotiation_status, frame)
self._sasl_negotiation_deferred.callback(response)
else:
# there's a second 4 byte length prefix inside the frame
decoded_frame = self.sasl.unwrap(frame[4:])
ThriftClientProtocol.stringReceived(self, decoded_frame)
class ThriftServerProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def dispatch(self, msg):
self.sendString(msg)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
self.dispatch(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
tmo = TTransport.TMemoryBuffer()
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = self.factory.oprot_factory.getProtocol(tmo)
d = self.factory.processor.process(iprot, oprot)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class IThriftServerFactory(Interface):
processor = Attribute("Thrift processor")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class IThriftClientFactory(Interface):
client_class = Attribute("Thrift client class")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class ThriftServerFactory(ServerFactory):
implements(IThriftServerFactory)
protocol = ThriftServerProtocol
def __init__(self, processor, iprot_factory, oprot_factory=None):
self.processor = processor
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
class ThriftClientFactory(ClientFactory):
implements(IThriftClientFactory)
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
class ThriftResource(resource.Resource):
allowedMethods = ('POST',)
def __init__(self, processor, inputProtocolFactory,
outputProtocolFactory=None):
resource.Resource.__init__(self)
self.inputProtocolFactory = inputProtocolFactory
if outputProtocolFactory is None:
self.outputProtocolFactory = inputProtocolFactory
else:
self.outputProtocolFactory = outputProtocolFactory
self.processor = processor
def getChild(self, path, request):
return self
def _cbProcess(self, _, request, tmo):
msg = tmo.getvalue()
request.setResponseCode(http.OK)
request.setHeader("content-type", "application/x-thrift")
request.write(msg)
request.finish()
def render_POST(self, request):
request.content.seek(0, 0)
data = request.content.read()
tmi = TTransport.TMemoryBuffer(data)
tmo = TTransport.TMemoryBuffer()
iprot = self.inputProtocolFactory.getProtocol(tmi)
oprot = self.outputProtocolFactory.getProtocol(tmo)
d = self.processor.process(iprot, oprot)
d.addCallback(self._cbProcess, request, tmo)
return server.NOT_DONE_YET
| apache-2.0 |
apigee/edx-platform | common/test/bok_choy/edxapp_pages/lms/progress.py | 4 | 3949 | from bok_choy.page_object import PageObject
from ..lms import BASE_URL
class ProgressPage(PageObject):
"""
Student progress page.
"""
@property
def name(self):
return "lms.progress"
@property
def requirejs(self):
return []
@property
def js_globals(self):
return []
def url(self, course_id=None):
return BASE_URL + "/courses/" + course_id + "/progress"
def is_browser_on_page(self):
has_course_info = self.is_css_present('section.course-info')
has_graph = self.is_css_present('div#grade-detail-graph')
return has_course_info and has_graph
def scores(self, chapter, section):
"""
Return a list of (points, max_points) tuples representing the scores
for the section.
Example:
section_scores('Week 1', 'Lesson 1', 2) --> [(2, 4), (0, 1)]
Returns `None` if no such chapter and section can be found.
"""
# Find the index of the section in the chapter
chapter_index = self._chapter_index(chapter)
if chapter_index is None:
return None
section_index = self._section_index(chapter_index, section)
if section_index is None:
return None
# Retrieve the scores for the section
return self._section_scores(chapter_index, section_index)
def _chapter_index(self, title):
"""
Return the CSS index of the chapter with `title`.
Returns `None` if it cannot find such a chapter.
"""
chapter_css = 'ol.chapters li h2'
chapter_titles = self.css_map(chapter_css, lambda el: el.text.lower().strip())
try:
# CSS indices are 1-indexed, so add one to the list index
return chapter_titles.index(title.lower()) + 1
except ValueError:
self.warning("Could not find chapter '{0}'".format(title))
return None
def _section_index(self, chapter_index, title):
"""
Return the CSS index of the section with `title` in the chapter at `chapter_index`.
Returns `None` if it can't find such a section.
"""
# This is a hideous CSS selector that means:
# Get the links containing the section titles in `chapter_index`.
# The link text is the section title.
section_css = 'ol.chapters>li:nth-of-type({0}) ol.sections li h3 a'.format(chapter_index)
section_titles = self.css_map(section_css, lambda el: el.text.lower().strip())
# The section titles also contain "n of m possible points" on the second line
# We have to remove this to find the right title
section_titles = [title.split('\n')[0] for title in section_titles]
# Some links are blank, so remove them
section_titles = [title for title in section_titles if title]
try:
# CSS indices are 1-indexed, so add one to the list index
return section_titles.index(title.lower()) + 1
except ValueError:
self.warning("Could not find section '{0}'".format(title))
return None
def _section_scores(self, chapter_index, section_index):
"""
Return a list of `(points, max_points)` tuples representing
the scores in the specified chapter and section.
`chapter_index` and `section_index` start at 1.
"""
# This is CSS selector means:
# Get the scores for the chapter at `chapter_index` and the section at `section_index`
# Example text of the retrieved elements: "0/1"
score_css = "ol.chapters>li:nth-of-type({0}) ol.sections>li:nth-of-type({1}) section.scores>ol>li".format(
chapter_index, section_index
)
text_scores = self.css_text(score_css)
# Convert text scores to tuples of (points, max_points)
return [tuple(map(int, score.split('/'))) for score in text_scores]
| agpl-3.0 |
OriHoch/Open-Knesset | agendas/forms.py | 14 | 5171 | from django import forms
from django.forms import ModelForm
from django.forms.formsets import formset_factory
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from models import (Agenda, AgendaVote, UserSuggestedVote,
AGENDAVOTE_SCORE_CHOICES,
IMPORTANCE_CHOICES)
class H4(forms.Widget):
""" used to display header fields """
input_type = None # Subclasses must define this.
def render(self, name, value, attrs=None):
return mark_safe(u'<h4>%s</h4>' % value)
class EditAgendaForm(forms.Form):
name = forms.CharField(max_length=300,
label=_(u'Agenda name'),
error_messages={'required': _('Please enter an agenda name'),
'max_length': _('Agenda name must be shorter than 300 characters')})
public_owner_name = forms.CharField(max_length=100,
label=_(u'Public owner name'),
error_messages={'required': _('Please enter a public owner name'),
'max_length': _('Public owner name must be shorter than 100 characters')})
description = forms.CharField(min_length=15,
label=_(u'Agenda description'),
error_messages={'required': _('Please enter a description for this agenda'),
'min_length': _('Agenda description must be at least 15 characters long')},
widget=forms.Textarea)
def __init__(self, agenda=None, *args, **kwargs):
super(EditAgendaForm, self).__init__(*args, **kwargs)
self.agenda = agenda
if self.agenda is not None:
self.initial = {'name': self.agenda.name,
'public_owner_name': self.agenda.public_owner_name,
'description': self.agenda.description,
}
class AddAgendaForm(ModelForm):
# to have the same names and help texts as the edit form, we need to override the form fields definitions:
name = forms.CharField(max_length=300,
label=_(u'Agenda name'),
error_messages={'required': _('Please enter an agenda name'),
'max_length': _('Agenda name must be shorter than 300 characters')})
public_owner_name = forms.CharField(max_length=100,
label=_(u'Public owner name'),
error_messages={'required': _('Please enter a public owner name'),
'max_length': _('Public owner name must be shorter than 100 characters')})
description = forms.CharField(min_length=15,
label=_(u'Agenda description'),
error_messages={'required': _('Please enter a description for this agenda'),
'min_length': _('Agenda description must be at least 15 characters long')},
widget=forms.Textarea)
class Meta:
model = Agenda
fields = ('name', 'public_owner_name', 'description')
class MeetingLinkingForm(forms.Form):
# a form to help agendas' editors tie meetings to agendas
agenda_name = forms.CharField(widget=H4, required=False, label='')
obj_id = forms.IntegerField(widget=forms.HiddenInput)
agenda_id = forms.IntegerField(widget=forms.HiddenInput)
weight = forms.TypedChoiceField(label=_('Importance'),
choices=IMPORTANCE_CHOICES,
required=False,
widget=forms.Select)
reasoning = forms.CharField(required=False, max_length=1000,
label=_(u'Reasoning'),
widget = forms.Textarea(attrs={'cols':30, 'rows':5}),
)
object_type = forms.CharField(widget=forms.HiddenInput)
def clean_weight(self):
data = self.cleaned_data['weight']
if data=="":
return 99
return data
def clean(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('weight') == 99:
cleaned_data["DELETE"] = 'on'
return cleaned_data
class VoteLinkingForm(MeetingLinkingForm):
weight = forms.TypedChoiceField(label=_('Position'), choices=AGENDAVOTE_SCORE_CHOICES,
required=False, widget=forms.Select)
importance = forms.TypedChoiceField(label=_('Importance'),
choices=IMPORTANCE_CHOICES,
required=False,
widget=forms.Select)
VoteLinkingFormSet = formset_factory(VoteLinkingForm, extra=0, can_delete=True)
MeetingLinkingFormSet = formset_factory(MeetingLinkingForm, extra=0,
can_delete=True)
| bsd-3-clause |
Yelp/paasta | tests/cli/test_cmds_list_deploy_queue.py | 1 | 3659 | #!/usr/bin/env python
# Copyright 2015-2020 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import pytest
from paasta_tools.cli.cmds.list_deploy_queue import list_deploy_queue
from paasta_tools.paastaapi import ApiException
from paasta_tools.paastaapi.models import DeployQueue
from paasta_tools.paastaapi.models import DeployQueueServiceInstance
@pytest.fixture(autouse=True)
def mock_load_system_paasta_config():
with mock.patch(
"paasta_tools.cli.cmds.list_deploy_queue.load_system_paasta_config",
autospec=True,
):
yield
@pytest.fixture(autouse=True)
def mock_list_clusters():
with mock.patch(
"paasta_tools.cli.cmds.list_deploy_queue.list_clusters", autospec=True,
) as _mock_list_clusters:
_mock_list_clusters.return_value = ["westeros-prod"]
yield
@pytest.fixture()
def mock_api():
with mock.patch(
"paasta_tools.cli.cmds.list_deploy_queue.get_paasta_oapi_client", autospec=True,
) as m:
yield m.return_value
def test_list_deploy_queue(mock_api, capfd):
args = mock.Mock(cluster="westeros-prod", json=False)
mock_api.default.deploy_queue.return_value = DeployQueue(
available_service_instances=[
DeployQueueServiceInstance(
service="service1",
instance="instance1",
watcher="watcher1",
bounce_by=1578038400.0,
wait_until=1578038400.0,
enqueue_time=1578038400.0,
bounce_start_time=1578038400.0,
failures=0,
processed_count=0,
),
],
unavailable_service_instances=[
DeployQueueServiceInstance(
service="service2",
instance="instance2",
watcher="watcher2",
bounce_by=1577952000.0,
wait_until=1577952000.0,
enqueue_time=1577952000.0,
bounce_start_time=1577952000.0,
failures=5,
processed_count=10,
),
],
)
return_value = list_deploy_queue(args)
assert return_value == 0
stdout, stderr = capfd.readouterr()
lines = stdout.split("\n")
assert args.cluster in lines[0]
assert "service1.instance1" in lines[3]
assert "service2.instance2" in lines[6]
def test_list_deploy_queue_json(mock_api, capfd):
args = mock.Mock(cluster="westeros-prod", json=True)
mock_api.default.deploy_queue.return_value = DeployQueue(
available_service_instances=[], unavailable_service_instances=[],
)
return_value = list_deploy_queue(args)
assert return_value == 0
stdout, stderr = capfd.readouterr()
assert stdout.strip() == json.dumps(
{"available_service_instances": [], "unavailable_service_instances": []}
)
def test_http_error(mock_api):
args = mock.Mock(cluster="westeros-prod")
mock_api.api_error = ApiException
mock_api.default.deploy_queue.side_effect = ApiException(
status=500, reason="Internal Server Error"
)
assert list_deploy_queue(args) == 500
| apache-2.0 |
sudheesh001/oh-mainline | vendor/packages/Django/django/core/management/commands/dumpdata.py | 125 | 9088 | from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core import serializers
from django.db import router, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.'),
make_option('--indent', default=None, dest='indent', type='int',
help='Specifies the indent level to use when pretty-printing output'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to dump '
'fixtures from. Defaults to the "default" database.'),
make_option('-e', '--exclude', dest='exclude',action='append', default=[],
help='An appname or appname.ModelName to exclude (use multiple --exclude to exclude multiple apps/models).'),
make_option('-n', '--natural', action='store_true', dest='use_natural_keys', default=False,
help='Use natural keys if they are available.'),
make_option('-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, including those that would otherwise be filtered or modified by a custom manager."),
)
help = ("Output the contents of the database as a fixture of the given "
"format (using each model's default manager unless --all is "
"specified).")
args = '[appname appname.ModelName ...]'
def handle(self, *app_labels, **options):
from django.db.models import get_app, get_apps, get_model
format = options.get('format')
indent = options.get('indent')
using = options.get('database')
excludes = options.get('exclude')
show_traceback = options.get('traceback')
use_natural_keys = options.get('use_natural_keys')
use_base_manager = options.get('use_base_manager')
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
app_label, model_name = exclude.split('.', 1)
model_obj = get_model(app_label, model_name)
if not model_obj:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model_obj)
else:
try:
app_obj = get_app(exclude)
excluded_apps.add(app_obj)
except ImproperlyConfigured:
raise CommandError('Unknown app in excludes: %s' % exclude)
if len(app_labels) == 0:
app_list = SortedDict((app, None) for app in get_apps() if app not in excluded_apps)
else:
app_list = SortedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
model = get_model(app_label, model_label)
if model is None:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
if app in app_list.keys():
if app_list[app] and model not in app_list[app]:
app_list[app].append(model)
else:
app_list[app] = [model]
except ValueError:
# This is just an app - no model qualifier
app_label = label
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
app_list[app] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
def get_objects():
# Collate the objects to be serialized.
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_syncdb(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
for obj in objects.using(using).\
order_by(model._meta.pk.name).iterator():
yield obj
try:
self.stdout.ending = None
serializers.serialize(format, get_objects(), indent=indent,
use_natural_keys=use_natural_keys, stream=self.stdout)
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
def sort_dependencies(app_list):
"""Sort a list of app,modellist pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
from django.db.models import get_model, get_models
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app, model_list in app_list:
if model_list is None:
model_list = get_models(app)
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [get_model(*d.split('.')) for d in deps]
else:
deps = []
# Now add a dependency for any FK or M2M relation with
# a model that defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
for field in model._meta.many_to_many:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise CommandError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
| agpl-3.0 |
ayushagrawal288/zamboni | mkt/account/urls.py | 10 | 1492 | from django.conf.urls import include, patterns, url
from mkt.account.views import (AccountView, FeedbackView, FxALoginView,
InstalledViewSet, LoginView, LogoutView,
NewsletterView, PermissionsView)
from mkt.feed.views import FeedShelfViewSet
from mkt.users import views
drf_patterns = patterns(
'',
url('^feedback/$', FeedbackView.as_view(), name='account-feedback'),
url('^installed/mine/$',
InstalledViewSet.as_view({'get': 'list'}), name='installed-apps'),
url('^installed/mine/remove_app/$',
InstalledViewSet.as_view({'post': 'remove_app'}),
name='installed-apps-remove'),
# Native FxA login view.
url('^login/$', LoginView.as_view(), name='account-login'),
# Oauth FxA login view.
url('^fxa-login/$', FxALoginView.as_view(), name='fxa-account-login'),
url('^logout/$', LogoutView.as_view(), name='account-logout'),
url('^newsletter/$', NewsletterView.as_view(), name='account-newsletter'),
url('^permissions/(?P<pk>[^/]+)/$', PermissionsView.as_view(),
name='account-permissions'),
url('^settings/(?P<pk>[^/]+)/$', AccountView.as_view(),
name='account-settings'),
url(r'^shelves/$', FeedShelfViewSet.as_view(
{'get': 'mine'}), name='feedshelves-mine'),
)
api_patterns = patterns(
'',
url('^account/', include(drf_patterns)),
)
user_patterns = patterns(
'',
url('^ajax$', views.ajax, name='users.ajax'),
)
| bsd-3-clause |
BoltzmannBrain/nupic | tests/unit/nupic/algorithms/sp_overlap_test.py | 34 | 7020 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This is a legacy test from trunk and may replicate spatial pooler tests.
The allocation of cells to new patterns is explored. After all the cells
have been allocated, cells must be reused. This test makes sure that the
allocation of new cells is such that we achieve maximum generality and
predictive power.
Note: Since the sp pooler has 2048 cells with a sparsity of 40 cells active
per iteration, 100% allocation is reached at the 51st unique pattern.
"""
import unittest2 as unittest
import random as rnd
import time
import numpy
from nupic.bindings.math import GetNTAReal
from nupic.encoders import scalar
from nupic.bindings.algorithms import SpatialPooler
realDType = GetNTAReal()
SEED = 42
class TestSPFrequency(unittest.TestCase):
def testCategory(self):
"""Test that the most frequent possible option is chosen for a scalar
encoded field """
self.frequency(n=100, w=21, seed=SEED, numColors=90, encoder = 'scalar')
def testScalar(self):
"""Test that the most frequent possible option is chosen for a category
encoded field """
self.frequency(n=30, w=21, seed=SEED, numColors=90, encoder = 'category')
@unittest.skip("Not working...")
def testScalarLong(self):
"""Test that the most frequent possible option is chosen for a scalar
encoded field. Run through many different numbers of patterns and random
seeds"""
for n in [52, 70, 80, 90, 100, 110]:
self.frequency(n=100, w=21, seed=SEED, numColors=n, encoder='scalar')
@unittest.skip("Not working...")
def testCategoryLong(self):
"""Test that the most frequent possible option is chosen for a category
encoded field. Run through many different numbers of patterns and random
seeds"""
for n in [52, 70, 80, 90, 100, 110]:
self.frequency(n=100, w=21, seed=SEED, numColors=n)
def frequency(self,
n=15,
w=7,
columnDimensions = 2048,
numActiveColumnsPerInhArea = 40,
stimulusThreshold = 0,
spSeed = 1,
spVerbosity = 0,
numColors = 2,
seed=42,
minVal=0,
maxVal=10,
encoder = 'category',
forced=True):
""" Helper function that tests whether the SP predicts the most
frequent record """
print "\nRunning SP overlap test..."
print encoder, 'encoder,', 'Random seed:', seed, 'and', numColors, 'colors'
#Setting up SP and creating training patterns
# Instantiate Spatial Pooler
spImpl = SpatialPooler(
columnDimensions=(columnDimensions, 1),
inputDimensions=(1, n),
potentialRadius=n/2,
numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
spVerbosity=spVerbosity,
stimulusThreshold=stimulusThreshold,
potentialPct=0.5,
seed=spSeed,
)
rnd.seed(seed)
numpy.random.seed(seed)
colors = []
coincs = []
reUsedCoincs = []
spOutput = []
patterns = set([])
# Setting up the encodings
if encoder=='scalar':
enc = scalar.ScalarEncoder(name='car', w=w, n=n, minval=minVal,
maxval=maxVal, periodic=False, forced=True) # forced: it's strongly recommended to use w>=21, in the example we force skip the check for readibility
for y in xrange(numColors):
temp = enc.encode(rnd.random()*maxVal)
colors.append(numpy.array(temp, dtype=realDType))
else:
for y in xrange(numColors):
sdr = numpy.zeros(n, dtype=realDType)
# Randomly setting w out of n bits to 1
sdr[rnd.sample(xrange(n), w)] = 1
colors.append(sdr)
# Training the sp
print 'Starting to train the sp on', numColors, 'patterns'
startTime = time.time()
for i in xrange(numColors):
# TODO: See https://github.com/numenta/nupic/issues/2072
spInput = colors[i]
onCells = numpy.zeros(columnDimensions)
spImpl.compute(spInput, True, onCells)
spOutput.append(onCells.tolist())
activeCoincIndices = set(onCells.nonzero()[0])
# Checking if any of the active cells have been previously active
reUsed = activeCoincIndices.intersection(patterns)
if len(reUsed) == 0:
# The set of all coincidences that have won at least once
coincs.append((i, activeCoincIndices, colors[i]))
else:
reUsedCoincs.append((i, activeCoincIndices, colors[i]))
# Adding the active cells to the set of coincs that have been active at
# least once
patterns.update(activeCoincIndices)
if (i + 1) % 100 == 0:
print 'Record number:', i + 1
print "Elapsed time: %.2f seconds" % (time.time() - startTime)
print len(reUsedCoincs), "re-used coinc(s),"
# Check if results match expectations
summ = []
for z in coincs:
summ.append(sum([len(z[1].intersection(y[1])) for y in reUsedCoincs]))
zeros = len([x for x in summ if x==0])
factor = max(summ)*len(summ)/sum(summ)
if len(reUsed) < 10:
self.assertLess(factor, 41,
"\nComputed factor: %d\nExpected Less than %d" % (
factor, 41))
self.assertLess(zeros, 0.99*len(summ),
"\nComputed zeros: %d\nExpected Less than %d" % (
zeros, 0.99*len(summ)))
else:
self.assertLess(factor, 8,
"\nComputed factor: %d\nExpected Less than %d" % (
factor, 8))
self.assertLess(zeros, 12,
"\nComputed zeros: %d\nExpected Less than %d" % (
zeros, 12))
def hammingDistance(s1, s2):
assert len(s1) == len(s2)
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
azureplus/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/yaml/nodes.py | 985 | 1440 |
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
value = self.value
#if isinstance(value, list):
# if len(value) == 0:
# value = '<empty>'
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = '<%d items>' % len(value)
#else:
# if len(value) > 75:
# value = repr(value[:70]+u' ... ')
# else:
# value = repr(value)
value = repr(value)
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
class ScalarNode(Node):
id = 'scalar'
def __init__(self, tag, value,
start_mark=None, end_mark=None, style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class CollectionNode(Node):
def __init__(self, tag, value,
start_mark=None, end_mark=None, flow_style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class SequenceNode(CollectionNode):
id = 'sequence'
class MappingNode(CollectionNode):
id = 'mapping'
| apache-2.0 |
blokhin/three.js | utils/exporters/blender/modules/msgpack/__init__.py | 659 | 1385 | # coding: utf-8
from msgpack._version import version
from msgpack.exceptions import *
from collections import namedtuple
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os
if os.environ.get('MSGPACK_PUREPYTHON'):
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
else:
try:
from msgpack._packer import Packer
from msgpack._unpacker import unpack, unpackb, Unpacker
except ImportError:
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| mit |
ROGUE-JCTD/vida | vida/firestation/templatetags/vida.py | 3 | 3225 | from django import template
from django.conf import settings
from django.template import defaultfilters
from django.utils.translation import pgettext, ugettext as _, ungettext
register = template.Library()
# A tuple of standard large number to their converters
intword_converters = (
(3, lambda number: (
ungettext('%(value).fk', '%(value).1fk', number),
ungettext('%(value)sk', '%(value)sk', number),
)),
(6, lambda number: (
ungettext('%(value).1fm', '%(value).1fm', number),
ungettext('%(value)sm', '%(value)sm', number),
)),
(9, lambda number: (
ungettext('%(value).1fb', '%(value).1fb', number),
ungettext('%(value)sb', '%(value)sb', number),
)),
(12, lambda number: (
ungettext('%(value).1ft', '%(value).1ft', number),
ungettext('%(value)s trillion', '%(value)s trillion', number),
)),
(15, lambda number: (
ungettext('%(value).1fq', '%(value).1fq', number),
ungettext('%(value)s quadrillion', '%(value)s quadrillion', number),
)),
)
@register.filter(is_safe=False)
def abbreviatedintword(value):
"""
Converts a large integer to a friendly text representation. Works best
for numbers over 1 million. For example, 1000000 becomes '1.0 million',
1200000 becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
value = defaultfilters.floatformat(value, 1)
template = string_formatted
else:
template = float_formatted
template = template % {'value': value}
return template.replace('.0', '')
for exponent, converters in intword_converters:
large_number = 10 ** exponent
if value < large_number * 1000:
new_value = value / float(large_number)
return _check_for_i18n(new_value, *converters(new_value))
return value
@register.simple_tag
def url_replace(request, field, value):
"""
Replaces or creates a GET parameter in a URL.
"""
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
@register.filter(is_safe=False)
def risk_level(value):
"""
Returns a string based risk level from a number.
1: Low
2: Medium
3: Medium
4: High
"""
if value == 1:
return 'low'
if value == 2 or value == 3:
return 'medium'
if value == 4:
return 'high'
@register.filter(is_safe=False)
def grade(value):
"""
Returns a string based grade from a number.
1: God
2: Fair
3: Fair
4: Poor
"""
if value == 1:
return 'good'
if value == 2 or value == 3:
return 'fair'
if value == 4:
return 'poor'
@register.filter(is_safe=False)
def quartile_text(value):
"""
Replaces or creates a GET parameter in a URL.
"""
return dict(zip(range(1, 5), ['lowest', 'second lowest', 'second highest', 'highest'])).get(value)
| mit |
olivertso/moneify | source/apps/dashboard/forms.py | 1 | 4575 | import calendar
from django import forms
from django.utils import timezone
from source.apps.dashboard.helpers import ItemHelper
from source.apps.dashboard.models import Item, Scenario, Tag
class BaseDatePickerInput(forms.DateInput):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.attrs = {"class": "datepicker", "autocomplete": "off"}
class MonthPickerInput(BaseDatePickerInput):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.attrs["class"] += " datepicker-m"
self.format = "%Y-%m"
class DatePickerInput(BaseDatePickerInput):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.attrs["class"] += " datepicker-d"
class DashboardFilter(forms.Form):
date__gte = forms.DateField(widget=MonthPickerInput(), input_formats=["%Y-%m"])
date__lte = forms.DateField(widget=MonthPickerInput(), input_formats=["%Y-%m"])
type = forms.ChoiceField(choices=(("", "All Types"),) + Item.TYPE_CHOICES, required=False)
tag = forms.ModelChoiceField(queryset=Tag.objects, empty_label="All Tags", required=False)
def __init__(self, *args, **kwargs):
self.scenario = kwargs.pop("scenario", None)
self.tags = Tag.objects.filter(scenario=self.scenario)
# If date__gte and date__lte are not provided, use current month as the
# date range.
if not kwargs["data"]:
today = timezone.localtime(timezone.now()).date()
kwargs.update(data={"date__gte": today, "date__lte": today})
super().__init__(*args, **kwargs)
self.fields["tag"].queryset = self.tags
def clean_date__gte(self):
date = self.cleaned_data.get("date__gte").replace(day=1)
return date
def clean_date__lte(self):
date = self.cleaned_data.get("date__lte")
date = date.replace(day=calendar.monthrange(date.year, date.month)[1])
return date
def filter_items(self):
"""
Return items filtered by its scenario and query string parameters. Form
should be valid when calling this method.
"""
params = {k: v for k, v in self.cleaned_data.items() if v}
return Item.objects.filter(scenario=self.scenario, **params)
def filter_context_data(self):
"""
Called when the request is not ajax. Return its scenario and tags if
form is invalid, otherwise also return filtered items and report.
"""
data = {"scenario": self.scenario, "tags": self.tags}
if self.is_valid():
items = self.filter_items()
data.update(items=items, report=ItemHelper.get_report(items))
return data
def filter_charts_data(self):
"""
Called when the request is ajax. Return the data of monthly reports and
tag expenses distribution charts. Form should be valid when calling this
method.
"""
items = self.filter_items()
return {
"monthly_reports": ItemHelper.get_monthly_reports(items),
"tag_exp_dist": ItemHelper.get_tag_exp_dist(items),
}
class ScenarioForm(forms.ModelForm):
class Meta:
model = Scenario
fields = ["name"]
def full_clean(self):
"""
Field user is not included in form fields, need to validate
unique_together constraint manually.
"""
super().full_clean()
try:
self.instance.validate_unique()
except forms.ValidationError:
error_message = "A scenario with this name already exists."
self._update_errors(forms.ValidationError(error_message))
class TagForm(forms.ModelForm):
class Meta:
model = Tag
fields = ["name"]
def full_clean(self):
"""
Field scenario is not included in form fields, need to validate
unique_together constraint manually.
"""
super().full_clean()
try:
self.instance.validate_unique()
except forms.ValidationError:
error_message = "A tag with the same name already exists in this scenario."
self._update_errors(forms.ValidationError(error_message))
class ItemForm(forms.ModelForm):
date = forms.DateField(widget=DatePickerInput())
class Meta:
model = Item
fields = ["type", "date", "description", "amount", "tag"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["tag"].queryset = Tag.objects.filter(scenario=self.instance.scenario)
| gpl-3.0 |
EdgarSun/Django-Demo | django/contrib/gis/geos/linestring.py | 411 | 5568 | from django.contrib.gis.geos.base import numpy
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos import prototypes as capi
class LineString(GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
#### Python 'magic' routines ####
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1: coords = args[0]
else: coords = args
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ncoords = len(coords)
if coords: ndim = len(coords[0])
else: raise TypeError('Cannot initialize on empty sequence.')
self._checkdim(ndim)
# Incrementing through each of the coordinates and verifying
for i in xrange(1, ncoords):
if not isinstance(coords[i], (tuple, list, Point)):
raise TypeError('each coordinate should be a sequence (list or tuple)')
if len(coords[i]) != ndim: raise TypeError('Dimension mismatch.')
numpy_coords = False
elif numpy and isinstance(coords, numpy.ndarray):
shape = coords.shape # Using numpy's shape.
if len(shape) != 2: raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ncoords = shape[0]
ndim = shape[1]
numpy_coords = True
else:
raise TypeError('Invalid initialization input for LineStrings.')
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim==3))
for i in xrange(ncoords):
if numpy_coords: cs[i] = coords[i,:]
elif isinstance(coords[i], Point): cs[i] = coords[i].tuple
else: cs[i] = coords[i]
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid', None)
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims #
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3): raise TypeError('Dimension mismatch.')
#### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in xrange(len(self))]
if numpy: return numpy.array(lst) # ARRRR!
else: return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def merged(self):
"Returns the line merge of this LineString."
return self._topology(capi.geos_linemerge(self.ptr))
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz: return None
else: return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minLength = 4
_init_func = capi.create_linearring
| mit |
anbangleo/NlsdeWeb | Python-3.6.0/Lib/test/test_ordered_dict.py | 2 | 28115 | import builtins
import contextlib
import copy
import gc
import pickle
from random import randrange, shuffle
import struct
import sys
import unittest
import weakref
from collections.abc import MutableMapping
from test import mapping_tests, support
py_coll = support.import_fresh_module('collections', blocked=['_collections'])
c_coll = support.import_fresh_module('collections', fresh=['_collections'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
class OrderedDictTests:
def test_init(self):
OrderedDict = self.OrderedDict
with self.assertRaises(TypeError):
OrderedDict([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input
self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input
self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input
self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
c=3, e=5).items()), pairs) # mixed input
# make sure no positional args conflict with possible kwdargs
self.assertEqual(list(OrderedDict(self=42).items()), [('self', 42)])
self.assertEqual(list(OrderedDict(other=42).items()), [('other', 42)])
self.assertRaises(TypeError, OrderedDict, 42)
self.assertRaises(TypeError, OrderedDict, (), ())
self.assertRaises(TypeError, OrderedDict.__init__)
# Make sure that direct calls to __init__ do not clear previous contents
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.__init__([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_update(self):
OrderedDict = self.OrderedDict
with self.assertRaises(TypeError):
OrderedDict().update([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict()
od.update(dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # dict input
od = OrderedDict()
od.update(**dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # kwds input
od = OrderedDict()
od.update(pairs)
self.assertEqual(list(od.items()), pairs) # pairs input
od = OrderedDict()
od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
self.assertEqual(list(od.items()), pairs) # mixed input
# Issue 9137: Named argument called 'other' or 'self'
# shouldn't be treated specially.
od = OrderedDict()
od.update(self=23)
self.assertEqual(list(od.items()), [('self', 23)])
od = OrderedDict()
od.update(other={})
self.assertEqual(list(od.items()), [('other', {})])
od = OrderedDict()
od.update(red=5, blue=6, other=7, self=8)
self.assertEqual(sorted(list(od.items())),
[('blue', 6), ('other', 7), ('red', 5), ('self', 8)])
# Make sure that direct calls to update do not clear previous contents
# add that updates items are not moved to the end
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.update([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
self.assertRaises(TypeError, OrderedDict().update, 42)
self.assertRaises(TypeError, OrderedDict().update, (), ())
self.assertRaises(TypeError, OrderedDict.update)
self.assertRaises(TypeError, OrderedDict().update, 42)
self.assertRaises(TypeError, OrderedDict().update, (), ())
self.assertRaises(TypeError, OrderedDict.update)
def test_init_calls(self):
calls = []
class Spam:
def keys(self):
calls.append('keys')
return ()
def items(self):
calls.append('items')
return ()
self.OrderedDict(Spam())
self.assertEqual(calls, ['keys'])
def test_fromkeys(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abc')
self.assertEqual(list(od.items()), [(c, None) for c in 'abc'])
od = OrderedDict.fromkeys('abc', value=None)
self.assertEqual(list(od.items()), [(c, None) for c in 'abc'])
od = OrderedDict.fromkeys('abc', value=0)
self.assertEqual(list(od.items()), [(c, 0) for c in 'abc'])
def test_abc(self):
OrderedDict = self.OrderedDict
self.assertIsInstance(OrderedDict(), MutableMapping)
self.assertTrue(issubclass(OrderedDict, MutableMapping))
def test_clear(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(len(od), len(pairs))
od.clear()
self.assertEqual(len(od), 0)
def test_delitem(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
del od['a']
self.assertNotIn('a', od)
with self.assertRaises(KeyError):
del od['a']
self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def test_setitem(self):
OrderedDict = self.OrderedDict
od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
od['c'] = 10 # existing element
od['f'] = 20 # new element
self.assertEqual(list(od.items()),
[('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
def test_iterators(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(list(od), [t[0] for t in pairs])
self.assertEqual(list(od.keys()), [t[0] for t in pairs])
self.assertEqual(list(od.values()), [t[1] for t in pairs])
self.assertEqual(list(od.items()), pairs)
self.assertEqual(list(reversed(od)),
[t[0] for t in reversed(pairs)])
self.assertEqual(list(reversed(od.keys())),
[t[0] for t in reversed(pairs)])
self.assertEqual(list(reversed(od.values())),
[t[1] for t in reversed(pairs)])
self.assertEqual(list(reversed(od.items())), list(reversed(pairs)))
def test_detect_deletion_during_iteration(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abc')
it = iter(od)
key = next(it)
del od[key]
with self.assertRaises(Exception):
# Note, the exact exception raised is not guaranteed
# The only guarantee that the next() will not succeed
next(it)
def test_sorted_iterators(self):
OrderedDict = self.OrderedDict
with self.assertRaises(TypeError):
OrderedDict([('a', 1), ('b', 2)], None)
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict(pairs)
self.assertEqual(sorted(od), [t[0] for t in pairs])
self.assertEqual(sorted(od.keys()), [t[0] for t in pairs])
self.assertEqual(sorted(od.values()), [t[1] for t in pairs])
self.assertEqual(sorted(od.items()), pairs)
self.assertEqual(sorted(reversed(od)),
sorted([t[0] for t in reversed(pairs)]))
def test_iterators_empty(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
empty = []
self.assertEqual(list(od), empty)
self.assertEqual(list(od.keys()), empty)
self.assertEqual(list(od.values()), empty)
self.assertEqual(list(od.items()), empty)
self.assertEqual(list(reversed(od)), empty)
self.assertEqual(list(reversed(od.keys())), empty)
self.assertEqual(list(reversed(od.values())), empty)
self.assertEqual(list(reversed(od.items())), empty)
def test_popitem(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
while pairs:
self.assertEqual(od.popitem(), pairs.pop())
with self.assertRaises(KeyError):
od.popitem()
self.assertEqual(len(od), 0)
def test_popitem_last(self):
OrderedDict = self.OrderedDict
pairs = [(i, i) for i in range(30)]
obj = OrderedDict(pairs)
for i in range(8):
obj.popitem(True)
obj.popitem(True)
obj.popitem(last=True)
self.assertEqual(len(obj), 20)
def test_pop(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
shuffle(pairs)
while pairs:
k, v = pairs.pop()
self.assertEqual(od.pop(k), v)
with self.assertRaises(KeyError):
od.pop('xyz')
self.assertEqual(len(od), 0)
self.assertEqual(od.pop(k, 12345), 12345)
# make sure pop still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
m = Missing(a=1)
self.assertEqual(m.pop('b', 5), 5)
self.assertEqual(m.pop('a', 6), 1)
self.assertEqual(m.pop('a', 6), 6)
self.assertEqual(m.pop('a', default=6), 6)
with self.assertRaises(KeyError):
m.pop('a')
def test_equality(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od1 = OrderedDict(pairs)
od2 = OrderedDict(pairs)
self.assertEqual(od1, od2) # same order implies equality
pairs = pairs[2:] + pairs[:2]
od2 = OrderedDict(pairs)
self.assertNotEqual(od1, od2) # different order implies inequality
# comparison to regular dict is not order sensitive
self.assertEqual(od1, dict(od2))
self.assertEqual(dict(od2), od1)
# different length implied inequality
self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def test_copying(self):
OrderedDict = self.OrderedDict
# Check that ordered dicts are copyable, deepcopyable, picklable,
# and have a repr/eval round-trip
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
def check(dup):
msg = "\ncopy: %s\nod: %s" % (dup, od)
self.assertIsNot(dup, od, msg)
self.assertEqual(dup, od)
self.assertEqual(list(dup.items()), list(od.items()))
self.assertEqual(len(dup), len(od))
self.assertEqual(type(dup), type(od))
check(od.copy())
check(copy.copy(od))
check(copy.deepcopy(od))
# pickle directly pulls the module, so we have to fake it
with replaced_module('collections', self.module):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
check(pickle.loads(pickle.dumps(od, proto)))
check(eval(repr(od)))
update_test = OrderedDict()
update_test.update(od)
check(update_test)
check(OrderedDict(od))
def test_yaml_linkage(self):
OrderedDict = self.OrderedDict
# Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature.
# In yaml, lists are native but tuples are not.
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
# yaml.dump(od) -->
# '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n'
self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
def test_reduce_not_too_fat(self):
OrderedDict = self.OrderedDict
# do not save instance dictionary if not needed
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
self.assertIsInstance(od.__dict__, dict)
self.assertIsNone(od.__reduce__()[2])
od.x = 10
self.assertEqual(od.__dict__['x'], 10)
self.assertEqual(od.__reduce__()[2], {'x': 10})
def test_pickle_recursive(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
od[1] = od
# pickle directly pulls the module, so we have to fake it
with replaced_module('collections', self.module):
for proto in range(-1, pickle.HIGHEST_PROTOCOL + 1):
dup = pickle.loads(pickle.dumps(od, proto))
self.assertIsNot(dup, od)
self.assertEqual(list(dup.keys()), [1])
self.assertIs(dup[1], dup)
def test_repr(self):
OrderedDict = self.OrderedDict
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
self.assertEqual(repr(od),
"OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
self.assertEqual(eval(repr(od)), od)
self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def test_repr_recursive(self):
OrderedDict = self.OrderedDict
# See issue #9826
od = OrderedDict.fromkeys('abc')
od['x'] = od
self.assertEqual(repr(od),
"OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])")
def test_setdefault(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
pair_order = list(od.items())
self.assertEqual(od.setdefault('a', 10), 3)
# make sure order didn't change
self.assertEqual(list(od.items()), pair_order)
self.assertEqual(od.setdefault('x', 10), 10)
# make sure 'x' is added to the end
self.assertEqual(list(od.items())[-1], ('x', 10))
self.assertEqual(od.setdefault('g', default=9), 9)
# make sure setdefault still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
self.assertEqual(Missing().setdefault(5, 9), 9)
def test_reinsert(self):
OrderedDict = self.OrderedDict
# Given insert a, insert b, delete a, re-insert a,
# verify that a is now later than b.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
del od['a']
self.assertEqual(list(od.items()), [('b', 2)])
od['a'] = 1
self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
def test_move_to_end(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abcde')
self.assertEqual(list(od), list('abcde'))
od.move_to_end('c')
self.assertEqual(list(od), list('abdec'))
od.move_to_end('c', 0)
self.assertEqual(list(od), list('cabde'))
od.move_to_end('c', 0)
self.assertEqual(list(od), list('cabde'))
od.move_to_end('e')
self.assertEqual(list(od), list('cabde'))
od.move_to_end('b', last=False)
self.assertEqual(list(od), list('bcade'))
with self.assertRaises(KeyError):
od.move_to_end('x')
with self.assertRaises(KeyError):
od.move_to_end('x', 0)
def test_move_to_end_issue25406(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abc')
od.move_to_end('c', last=False)
self.assertEqual(list(od), list('cab'))
od.move_to_end('a', last=False)
self.assertEqual(list(od), list('acb'))
od = OrderedDict.fromkeys('abc')
od.move_to_end('a')
self.assertEqual(list(od), list('bca'))
od.move_to_end('c')
self.assertEqual(list(od), list('bac'))
def test_sizeof(self):
OrderedDict = self.OrderedDict
# Wimpy test: Just verify the reported size is larger than a regular dict
d = dict(a=1)
od = OrderedDict(**d)
self.assertGreater(sys.getsizeof(od), sys.getsizeof(d))
def test_views(self):
OrderedDict = self.OrderedDict
# See http://bugs.python.org/issue24286
s = 'the quick brown fox jumped over a lazy dog yesterday before dawn'.split()
od = OrderedDict.fromkeys(s)
self.assertEqual(od.keys(), dict(od).keys())
self.assertEqual(od.items(), dict(od).items())
def test_override_update(self):
OrderedDict = self.OrderedDict
# Verify that subclasses can override update() without breaking __init__()
class MyOD(OrderedDict):
def update(self, *args, **kwds):
raise Exception()
items = [('a', 1), ('c', 3), ('b', 2)]
self.assertEqual(list(MyOD(items).items()), items)
def test_highly_nested(self):
# Issue 25395: crashes during garbage collection
OrderedDict = self.OrderedDict
obj = None
for _ in range(1000):
obj = OrderedDict([(None, obj)])
del obj
support.gc_collect()
def test_highly_nested_subclass(self):
# Issue 25395: crashes during garbage collection
OrderedDict = self.OrderedDict
deleted = []
class MyOD(OrderedDict):
def __del__(self):
deleted.append(self.i)
obj = None
for i in range(100):
obj = MyOD([(None, obj)])
obj.i = i
del obj
support.gc_collect()
self.assertEqual(deleted, list(reversed(range(100))))
def test_delitem_hash_collision(self):
OrderedDict = self.OrderedDict
class Key:
def __init__(self, hash):
self._hash = hash
self.value = str(id(self))
def __hash__(self):
return self._hash
def __eq__(self, other):
try:
return self.value == other.value
except AttributeError:
return False
def __repr__(self):
return self.value
def blocking_hash(hash):
# See the collision-handling in lookdict (in Objects/dictobject.c).
MINSIZE = 8
i = (hash & MINSIZE-1)
return (i << 2) + i + hash + 1
COLLIDING = 1
key = Key(COLLIDING)
colliding = Key(COLLIDING)
blocking = Key(blocking_hash(COLLIDING))
od = OrderedDict()
od[key] = ...
od[blocking] = ...
od[colliding] = ...
od['after'] = ...
del od[blocking]
del od[colliding]
self.assertEqual(list(od.items()), [(key, ...), ('after', ...)])
def test_issue24347(self):
OrderedDict = self.OrderedDict
class Key:
def __hash__(self):
return randrange(100000)
od = OrderedDict()
for i in range(100):
key = Key()
od[key] = i
# These should not crash.
with self.assertRaises(KeyError):
list(od.values())
with self.assertRaises(KeyError):
list(od.items())
with self.assertRaises(KeyError):
repr(od)
with self.assertRaises(KeyError):
od.copy()
def test_issue24348(self):
OrderedDict = self.OrderedDict
class Key:
def __hash__(self):
return 1
od = OrderedDict()
od[Key()] = 0
# This should not crash.
od.popitem()
def test_issue24667(self):
"""
dict resizes after a certain number of insertion operations,
whether or not there were deletions that freed up slots in the
hash table. During fast node lookup, OrderedDict must correctly
respond to all resizes, even if the current "size" is the same
as the old one. We verify that here by forcing a dict resize
on a sparse odict and then perform an operation that should
trigger an odict resize (e.g. popitem). One key aspect here is
that we will keep the size of the odict the same at each popitem
call. This verifies that we handled the dict resize properly.
"""
OrderedDict = self.OrderedDict
od = OrderedDict()
for c0 in '0123456789ABCDEF':
for c1 in '0123456789ABCDEF':
if len(od) == 4:
# This should not raise a KeyError.
od.popitem(last=False)
key = c0 + c1
od[key] = key
# Direct use of dict methods
def test_dict_setitem(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
dict.__setitem__(od, 'spam', 1)
self.assertNotIn('NULL', repr(od))
def test_dict_delitem(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
od['spam'] = 1
od['ham'] = 2
dict.__delitem__(od, 'spam')
with self.assertRaises(KeyError):
repr(od)
def test_dict_clear(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
od['spam'] = 1
od['ham'] = 2
dict.clear(od)
self.assertNotIn('NULL', repr(od))
def test_dict_pop(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
od['spam'] = 1
od['ham'] = 2
dict.pop(od, 'spam')
with self.assertRaises(KeyError):
repr(od)
def test_dict_popitem(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
od['spam'] = 1
od['ham'] = 2
dict.popitem(od)
with self.assertRaises(KeyError):
repr(od)
def test_dict_setdefault(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
dict.setdefault(od, 'spam', 1)
self.assertNotIn('NULL', repr(od))
def test_dict_update(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
dict.update(od, [('spam', 1)])
self.assertNotIn('NULL', repr(od))
def test_reference_loop(self):
# Issue 25935
OrderedDict = self.OrderedDict
class A:
od = OrderedDict()
A.od[A] = None
r = weakref.ref(A)
del A
gc.collect()
self.assertIsNone(r())
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, self.OrderedDict)
support.check_free_after_iterating(self, lambda d: iter(d.keys()), self.OrderedDict)
support.check_free_after_iterating(self, lambda d: iter(d.values()), self.OrderedDict)
support.check_free_after_iterating(self, lambda d: iter(d.items()), self.OrderedDict)
class PurePythonOrderedDictTests(OrderedDictTests, unittest.TestCase):
module = py_coll
OrderedDict = py_coll.OrderedDict
class CPythonBuiltinDictTests(unittest.TestCase):
"""Builtin dict preserves insertion order.
Reuse some of tests in OrderedDict selectively.
"""
module = builtins
OrderedDict = dict
for method in (
"test_init test_update test_abc test_clear test_delitem " +
"test_setitem test_detect_deletion_during_iteration " +
"test_popitem test_reinsert test_override_update " +
"test_highly_nested test_highly_nested_subclass " +
"test_delitem_hash_collision ").split():
setattr(CPythonBuiltinDictTests, method, getattr(OrderedDictTests, method))
del method
@unittest.skipUnless(c_coll, 'requires the C version of the collections module')
class CPythonOrderedDictTests(OrderedDictTests, unittest.TestCase):
module = c_coll
OrderedDict = c_coll.OrderedDict
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof_exact(self):
OrderedDict = self.OrderedDict
calcsize = struct.calcsize
size = support.calcobjsize
check = self.check_sizeof
basicsize = size('nQ2P' + '3PnPn2P') + calcsize('2nP2n')
entrysize = calcsize('n2P')
p = calcsize('P')
nodesize = calcsize('Pn2P')
od = OrderedDict()
check(od, basicsize + 8*p + 8 + 5*entrysize) # 8byte indicies + 8*2//3 * entry table
od.x = 1
check(od, basicsize + 8*p + 8 + 5*entrysize)
od.update([(i, i) for i in range(3)])
check(od, basicsize + 8*p + 8 + 5*entrysize + 3*nodesize)
od.update([(i, i) for i in range(3, 10)])
check(od, basicsize + 16*p + 16 + 10*entrysize + 10*nodesize)
check(od.keys(), size('P'))
check(od.items(), size('P'))
check(od.values(), size('P'))
itersize = size('iP2n2P')
check(iter(od), itersize)
check(iter(od.keys()), itersize)
check(iter(od.items()), itersize)
check(iter(od.values()), itersize)
def test_key_change_during_iteration(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abcde')
self.assertEqual(list(od), list('abcde'))
with self.assertRaises(RuntimeError):
for i, k in enumerate(od):
od.move_to_end(k)
self.assertLess(i, 5)
with self.assertRaises(RuntimeError):
for k in od:
od['f'] = None
with self.assertRaises(RuntimeError):
for k in od:
del od['c']
self.assertEqual(list(od), list('bdeaf'))
class PurePythonOrderedDictSubclassTests(PurePythonOrderedDictTests):
module = py_coll
class OrderedDict(py_coll.OrderedDict):
pass
class CPythonOrderedDictSubclassTests(CPythonOrderedDictTests):
module = c_coll
class OrderedDict(c_coll.OrderedDict):
pass
class PurePythonGeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
@classmethod
def setUpClass(cls):
cls.type2test = py_coll.OrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
@unittest.skipUnless(c_coll, 'requires the C version of the collections module')
class CPythonGeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
@classmethod
def setUpClass(cls):
cls.type2test = c_coll.OrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
class PurePythonSubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
@classmethod
def setUpClass(cls):
class MyOrderedDict(py_coll.OrderedDict):
pass
cls.type2test = MyOrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
@unittest.skipUnless(c_coll, 'requires the C version of the collections module')
class CPythonSubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
@classmethod
def setUpClass(cls):
class MyOrderedDict(c_coll.OrderedDict):
pass
cls.type2test = MyOrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
if __name__ == "__main__":
unittest.main()
| mit |
siggame/Joueur.py | games/stumped/game.py | 1 | 6599 | # Game: Gather branches and build up your lodge as beavers fight to survive.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from typing import Dict, List, Optional
from joueur.base_game import BaseGame
# import game objects
from games.stumped.beaver import Beaver
from games.stumped.game_object import GameObject
from games.stumped.job import Job
from games.stumped.player import Player
from games.stumped.spawner import Spawner
from games.stumped.tile import Tile
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Game(BaseGame):
"""The class representing the Game in the Stumped game.
Gather branches and build up your lodge as beavers fight to survive.
"""
def __init__(self):
"""Initializes a Game with basic logic as provided by the Creer code generator.
"""
BaseGame.__init__(self)
# private attributes to hold the properties so they appear read only
self._beavers = []
self._current_player = None
self._current_turn = 0
self._free_beavers_count = 0
self._game_objects = {}
self._jobs = []
self._lodge_cost_constant = 0
self._lodges_to_win = 0
self._map_height = 0
self._map_width = 0
self._max_turns = 100
self._players = []
self._session = ""
self._spawner = []
self._spawner_harvest_constant = 0
self._spawner_types = []
self._tiles = []
self._time_added_per_turn = 0
self.name = "Stumped"
self._game_object_classes = {
'Beaver': Beaver,
'GameObject': GameObject,
'Job': Job,
'Player': Player,
'Spawner': Spawner,
'Tile': Tile
}
@property
def beavers(self) -> List['games.stumped.beaver.Beaver']:
"""list[games.stumped.beaver.Beaver]: Every Beaver in the game.
"""
return self._beavers
@property
def current_player(self) -> 'games.stumped.player.Player':
"""games.stumped.player.Player: The player whose turn it is currently. That player can send commands. Other players cannot.
"""
return self._current_player
@property
def current_turn(self) -> int:
"""int: The current turn number, starting at 0 for the first player's turn.
"""
return self._current_turn
@property
def free_beavers_count(self) -> int:
"""int: When a Player has less Beavers than this number, then recruiting other Beavers is free.
"""
return self._free_beavers_count
@property
def game_objects(self) -> Dict[str, 'games.stumped.game_object.GameObject']:
"""dict[str, games.stumped.game_object.GameObject]: A mapping of every game object's ID to the actual game object. Primarily used by the server and client to easily refer to the game objects via ID.
"""
return self._game_objects
@property
def jobs(self) -> List['games.stumped.job.Job']:
"""list[games.stumped.job.Job]: All the Jobs that Beavers can have in the game.
"""
return self._jobs
@property
def lodge_cost_constant(self) -> float:
"""float: Constant number used to calculate what it costs to spawn a new lodge.
"""
return self._lodge_cost_constant
@property
def lodges_to_win(self) -> int:
"""int: How many lodges must be owned by a Player at once to win the game.
"""
return self._lodges_to_win
@property
def map_height(self) -> int:
"""int: The number of Tiles in the map along the y (vertical) axis.
"""
return self._map_height
@property
def map_width(self) -> int:
"""int: The number of Tiles in the map along the x (horizontal) axis.
"""
return self._map_width
@property
def max_turns(self) -> int:
"""int: The maximum number of turns before the game will automatically end.
"""
return self._max_turns
@property
def players(self) -> List['games.stumped.player.Player']:
"""list[games.stumped.player.Player]: List of all the players in the game.
"""
return self._players
@property
def session(self) -> str:
"""str: A unique identifier for the game instance that is being played.
"""
return self._session
@property
def spawner(self) -> List['games.stumped.spawner.Spawner']:
"""list[games.stumped.spawner.Spawner]: Every Spawner in the game.
"""
return self._spawner
@property
def spawner_harvest_constant(self) -> float:
"""float: Constant number used to calculate how many branches/food Beavers harvest from Spawners.
"""
return self._spawner_harvest_constant
@property
def spawner_types(self) -> List[str]:
"""list[str]: All the types of Spawners in the game.
"""
return self._spawner_types
@property
def tiles(self) -> List['games.stumped.tile.Tile']:
"""list[games.stumped.tile.Tile]: All the tiles in the map, stored in Row-major order. Use `x + y * mapWidth` to access the correct index.
"""
return self._tiles
@property
def time_added_per_turn(self) -> int:
"""int: The amount of time (in nano-seconds) added after each player performs a turn.
"""
return self._time_added_per_turn
def get_tile_at(self, x: int, y: int) -> Optional['games.stumped.tile.Tile']:
"""Gets the Tile at a specified (x, y) position.
Args:
x (int): An integer between 0 and the map_width.
y (int): An integer between 0 and the map_height.
Returns:
games.stumped.tile.Tile or None: The Tile at (x, y) or None if out of bounds.
"""
if x < 0 or y < 0 or x >= self.map_width or y >= self.map_height:
# out of bounds
return None
return self.tiles[x + y * self.map_width]
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| mit |
shootstar/novatest | nova/api/openstack/compute/contrib/scheduler_hints.py | 3 | 2087 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
class SchedulerHintsController(wsgi.Controller):
@staticmethod
def _extract_scheduler_hints(body):
hints = {}
attr = '%s:scheduler_hints' % Scheduler_hints.alias
try:
if 'os:scheduler_hints' in body:
# NOTE(vish): This is for legacy support
hints.update(body['os:scheduler_hints'])
elif attr in body:
hints.update(body[attr])
# Fail if non-dict provided
except ValueError:
msg = _("Malformed scheduler_hints attribute")
raise webob.exc.HTTPBadRequest(reason=msg)
return hints
@wsgi.extends
def create(self, req, body):
hints = self._extract_scheduler_hints(body)
if 'server' in body:
body['server']['scheduler_hints'] = hints
yield
class Scheduler_hints(extensions.ExtensionDescriptor):
"""Pass arbitrary key/value pairs to the scheduler."""
name = "SchedulerHints"
alias = "OS-SCH-HNT"
namespace = ("http://docs.openstack.org/compute/ext/"
"scheduler-hints/api/v2")
updated = "2011-07-19T00:00:00+00:00"
def get_controller_extensions(self):
controller = SchedulerHintsController()
ext = extensions.ControllerExtension(self, 'servers', controller)
return [ext]
| apache-2.0 |
sarantapichos/faircoop-market | openerp/conf/deprecation.py | 380 | 2602 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Regroup variables for deprecated features.
To keep the OpenERP server backward compatible with older modules, some
additional code is needed throughout the core library. This module keeps
track of those specific measures by providing variables that can be unset
by the user to check if her code is future proof.
In a perfect world, all these variables are set to False, the corresponding
code removed, and thus these variables made unnecessary.
"""
# If True, the Python modules inside the openerp namespace are made available
# without the 'openerp.' prefix. E.g. openerp.osv.osv and osv.osv refer to the
# same module.
# Introduced around 2011.02.
# Change to False around 2013.02.
open_openerp_namespace = False
# If True, openerp.netsvc.LocalService() can be used to lookup reports or to
# access openerp.workflow.
# Introduced around 2013.03.
# Among the related code:
# - The openerp.netsvc.LocalService() function.
# - The openerp.report.interface.report_int._reports dictionary.
# - The register attribute in openerp.report.interface.report_int (and in its
# - auto column in ir.actions.report.xml.
# inheriting classes).
allow_local_service = True
# Applies for the register attribute in openerp.report.interface.report_int.
# See comments for allow_local_service above.
# Introduced around 2013.03.
allow_report_int_registration = True
# If True, the functions in openerp.pooler can be used.
# Introduced around 2013.03 (actually they are deprecated since much longer
# but no warning was dispayed in the logs).
openerp_pooler = True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mvidalgarcia/indico | indico/modules/events/cloning.py | 2 | 8710 | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from collections import OrderedDict
from operator import attrgetter
from indico.core import signals
from indico.util.caching import memoize_request
from indico.util.decorators import cached_classproperty
from indico.util.signals import named_objects_from_signal
class EventCloner(object):
"""
Base class to define cloning operations to be executed when an
event is cloned.
:param old_event: The event that's being cloned
"""
#: unique name of the clone action
name = None
#: the displayed name of the cloner
friendly_name = None
#: cloners that must be selected for this one to be available.
#: they are also guaranteed to run before this one.
requires = frozenset()
#: cloners that must run before this one (if enabled), but this
#: one runs even if they are not enabled
uses = frozenset()
#: Whether the clone operation is selected by default.
#: Use this to deselect options which are less common and thus
#: should not be enabled by default when cloning an event.
is_default = False
#: Whether this cloner is internal and never shown in the list.
#: An internal cloner is executed when `is_default` is set to
#: ``True`` or another cloner depends on it (always use `requires`
#: for this; `uses` will not enable an internal cloner). If
#: you override `is_visible` for an internal cloner (which only
#: makes sense when turning `is_internal` into a property), make
#: sure to check the super return value of `is_visible` to prevent
#: an internal cloner from showing up in the cloner selection.
is_internal = False
#: Whether this cloner is always available when pulled in as a
#: 'requires' dependency. This allows requiring a cloner without
#: having to keep it available even if there are no clonable
#: objects. For example, you may have something that uses the
#: 'tracks' cloner since it can reference tracks (and thus needs
#: them cloned) but also contains various other things that may
#: be clone-worthy even without tracks being set-up. While one
#: may think about using 'uses' instead of 'requires' first this
#: would result in people having to explicitly enable the other
#: cloner even if it makes no sense to not run it.
always_available_dep = False
@classmethod
def get_cloners(cls, old_event):
"""Return the list of cloners (sorted for display)"""
return sorted((cloner_cls(old_event) for cloner_cls in get_event_cloners().itervalues()),
key=attrgetter('friendly_name'))
@classmethod
def run_cloners(cls, old_event, new_event, cloners):
all_cloners = OrderedDict((name, cloner_cls(old_event))
for name, cloner_cls in get_event_cloners().iteritems())
if any(cloner.is_internal for name, cloner in all_cloners.iteritems() if name in cloners):
raise Exception('An internal cloner was selected')
# enable internal cloners that are enabled by default or required by another cloner
cloners |= {c.name
for c in all_cloners.itervalues()
if c.is_internal and (c.is_default or c.required_by_deep & cloners)}
# enable unavailable cloners that may be pulled in as a dependency nonetheless
extra = {c.name
for c in all_cloners.itervalues()
if not c.is_available and c.always_available_dep and c.required_by_deep & cloners}
cloners |= extra
active_cloners = OrderedDict((name, cloner) for name, cloner in all_cloners.iteritems() if name in cloners)
if not all((c.is_internal or c.is_visible) and c.is_available
for c in active_cloners.itervalues()
if c.name not in extra):
raise Exception('An invisible/unavailable cloner was selected')
for name, cloner in active_cloners.iteritems():
if not (cloners >= cloner.requires_deep):
raise Exception('Cloner {} requires {}'.format(name, ', '.join(cloner.requires_deep - cloners)))
shared_data = {}
cloner_names = set(active_cloners)
for name, cloner in active_cloners.iteritems():
shared_data[name] = cloner.run(new_event, cloner_names, cloner._prepare_shared_data(shared_data))
@cached_classproperty
@classmethod
def requires_deep(cls):
"""All cloner names required by this cloner.
This includes cloners required by a requirement.
"""
cloners = get_event_cloners()
todo = set(cls.requires)
required = set()
while todo:
cloner = todo.pop()
required.add(cloner)
todo |= cloners[cloner].requires
return required
@cached_classproperty
@classmethod
def required_by_deep(cls):
"""All cloner names depending on this cloner.
This includes cloners which depend on a cloner depending on
this cloner.
"""
# This is not very efficient, but it runs exactly once on a not-very-large set
return {cloner.name for cloner in get_event_cloners().itervalues() if cls.name in cloner.requires_deep}
def __init__(self, old_event):
self.old_event = old_event
def run(self, new_event, cloners, shared_data):
"""Performs the cloning operation.
:param new_event: The `Event` that's created by the cloning
operation.
:param cloners: A set containing the names of all enabled
cloners.
:param shared_data: A dict containing the data returned by
other cloners. Only data from cloners
specified in `requires` or `uses` will
be available in the dict. If a *used*
cloner was not selected, its name will
not be present in the data dict. The
value may be ``None`` depending on the
cloner. This would indicate that the
cloner was executed but did not return
any data.
:return: data that may be used by other cloners depending on
or using this cloner
"""
raise NotImplementedError
@property
def is_visible(self):
"""Whether the clone operation should be shown at all.
Use this to hide an option because of a feature not being
enabled or because of the event type not supporting it.
"""
return not self.is_internal
@property
def is_available(self):
"""Whether the clone operation can be selected.
Use this to disable options if selecting them wouldn't make
sense, e.g. because there is nothing to clone.
"""
return True
def _prepare_shared_data(self, shared_data):
linked = self.uses | self.requires
return {k: v for k, v in shared_data.iteritems() if k in linked}
def _resolve_dependencies(cloners):
cloner_deps = {name: (cls.requires, cls.uses) for name, cls in cloners.iteritems()}
resolved_deps = set()
while cloner_deps:
# Get cloners with both hard and soft dependencies being met
ready = {cls for cls, deps in cloner_deps.iteritems() if all(d <= resolved_deps for d in deps)}
if not ready:
# Otherwise check for cloners with all hard dependencies being met
ready = {cls for cls, deps in cloner_deps.iteritems() if deps[0] <= resolved_deps}
if not ready:
# Either a circular dependency or a dependency that's not loaded
raise Exception('Could not resolve dependencies between cloners (remaining: {})'
.format(', '.join(cloner_deps)))
resolved_deps |= ready
for name in ready:
yield name, cloners[name]
del cloner_deps[name]
@memoize_request
def get_event_cloners():
"""Get the dict containing all available event cloners.
The returned dict is ordered based on the dependencies of each
cloner and when executing the cloners MUST be executed in that
order.
"""
cloners = named_objects_from_signal(signals.event_management.get_cloners.send(), plugin_attr='plugin')
return OrderedDict(_resolve_dependencies(cloners))
| mit |
briancoutinho0905/2dsampling | src/mem/ruby/structures/WireBuffer.py | 32 | 1833 | # Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Lisa Hsu
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
class RubyWireBuffer(SimObject):
type = 'RubyWireBuffer'
cxx_class = 'WireBuffer'
cxx_header = "mem/ruby/structures/WireBuffer.hh"
ruby_system = Param.RubySystem(Parent.any, "")
| bsd-3-clause |
mlhenderson/narrative | src/biokbase/narrative/common/tests/util.py | 6 | 3349 | """
Test utility functions
"""
__author__ = 'Dan Gunter <[email protected]>'
import logging
import pickle
import struct
import threading
import time
import unittest
import SocketServer
from biokbase.narrative.common import util
_log = logging.getLogger('kbtest')
_hnd = logging.StreamHandler()
_hnd.setFormatter(logging.Formatter("[%(levelname)s] %(asctime)s %(name)s: %(message)s"))
_log.addHandler(_hnd)
_log.setLevel(logging.DEBUG)
def test_logger(name):
return logging.getLogger('kbtest.' + name)
class MyTestCase(unittest.TestCase):
def test_kvparse(self):
for input, text, kvp in (
("foo", "foo", {}),
("name=val", "", {"name": "val"}),
("a name=val boy", "a boy", {"name": "val"})
):
rkvp = {}
rtext = util.parse_kvp(input, rkvp)
self.assertEqual(text, rtext, "Text '{}' does not match "
"result '{}' "
"from input '{}'".format(
text, rtext, input))
self.assertEqual(text, rtext, "Dict '{}' does not match "
"result '{}' "
"from input '{}'".format(
kvp, rkvp, input))
class SocketServerBuf(SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, addr, handler):
SocketServer.TCPServer.__init__(self, addr, handler)
self.buf = ""
def get_data(self):
"""Get current buffer and clear it."""
data, self.buf = self.buf, ""
return data
def server_close(self):
self.socket.close()
def recvall(socket, n, timeout=0):
buf, m, t = '', 0, time.time()
while m < n:
if timeout > 0 and (time.time() - t > timeout):
raise RuntimeError("Timeout")
b = socket.recv(n - m)
if b:
buf += b
m += len(b)
#print("@@ recv {}".format(len(b)))
else:
time.sleep(0.1)
#print("@@ recv 0/{}".format(n - m))
return buf
class MessageBufferer(SocketServer.BaseRequestHandler):
def handle(self):
self.request.settimeout(1)
while 1:
try:
hdr = self.request.recv(4)
except Exception as err:
return
if not hdr:
return
size = struct.unpack('>L', hdr)[0]
#print("@@ body {}".format(size))
if size < 65536:
chunk = recvall(self.request, size, timeout=1)
record = pickle.loads(chunk)
#print("@@ message <{}>".format(record['message']))
self.server.buf += record['message']
def start_tcp_server(host, port, poll_interval):
_log.info("Starting server on {}:{}".format(host, port))
server = SocketServerBuf((host, port), MessageBufferer)
thr = threading.Thread(target=server.serve_forever,
args=[poll_interval])
thr.daemon = True
thr.start()
return server, thr
def stop_tcp_server(server, thr):
_log.info("Stopping server")
server.shutdown()
thr.join()
_log.info("Stopped server")
server.server_close()
_log.info("Closed server")
if __name__ == '__main__':
unittest.main()
| mit |
hasadna/knesset-data-pipelines | datapackage_pipelines_knesset/retry_get_response_content.py | 1 | 3427 | import logging
import os
import time
import requests
from datapackage_pipelines_knesset.dataservice.exceptions import ReachedMaxRetries, InvalidStatusCodeException
def is_blocked(content):
for str in ['if(u82222.w(u82222.O', 'window.rbzid=', '<html><head><meta charset="utf-8"><script>']:
if str in content:
return True
return False
def get_retry_response_content(url, params, timeout, proxies, retry_num, num_retries, seconds_between_retries,
skip_not_found_errors=False, headers=None):
if not timeout or float(timeout) < 60:
timeout = 60
proxies = proxies if proxies else {}
headers = headers if headers else {}
if os.environ.get("DATASERVICE_HTTP_PROXY"):
proxies["http"] = os.environ["DATASERVICE_HTTP_PROXY"]
try:
response = requests.get(url, params=params, timeout=timeout, proxies=proxies, headers=headers)
except requests.exceptions.InvalidSchema:
# missing dependencies for SOCKS support
raise
except requests.RequestException as e:
# network / http problem - start the retry mechanism
if (retry_num < num_retries):
logging.exception(e)
logging.info("url: {} params: {}".format(url, params))
logging.info("retry {} / {}, waiting {} seconds before retrying...".format(retry_num,
num_retries,
seconds_between_retries))
time.sleep(seconds_between_retries)
return get_retry_response_content(url, params, timeout, proxies, retry_num + 1, num_retries,
seconds_between_retries, headers=headers)
else:
logging.info("url: {} params: {}".format(url, params))
raise ReachedMaxRetries(e)
if response.status_code != 200:
# http status_code is not 200 - retry won't help here
if response.status_code == 404 and skip_not_found_errors:
return bytes("", "utf-8")
else:
logging.info("url: {} params: {}".format(url, params))
raise InvalidStatusCodeException(response.status_code, response.content)
else:
try:
response_text = response.content.decode('utf-8')
except Exception:
response_text = None
if response_text and is_blocked(response_text) and 'Set-Cookie' in response.headers and 'Cookie' not in headers:
# try again, but this time with the cookie we were given
first_cookie = response.headers['Set-Cookie'].split(";")[0]
headers['Cookie'] = first_cookie
return get_retry_response_content(url, params, timeout, proxies, retry_num, num_retries,
seconds_between_retries, headers=headers)
if response_text and is_blocked(response_text):
# logging.info(response.content.decode('utf-8'))
raise Exception("seems your request is blocked, you should use the app ssh socks proxy\n"
"url={}\n"
"params={}\n"
"proxies={}\n"
"headers={}".format(url, params, proxies, headers))
else:
return response.content
| mit |
offbyone/tweetstream | stream/requests_oauthlib/oauth2_session.py | 1 | 15013 | from __future__ import unicode_literals
import logging
from oauthlib.common import generate_token, urldecode
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import TokenExpiredError, is_secure_transport
import requests
log = logging.getLogger(__name__)
class TokenUpdated(Warning):
def __init__(self, token):
super(TokenUpdated, self).__init__()
self.token = token
class OAuth2Session(requests.Session):
"""Versatile OAuth 2 extension to :class:`requests.Session`.
Supports any grant type adhering to :class:`oauthlib.oauth2.Client` spec
including the four core OAuth 2 grants.
Can be used to create authorization urls, fetch tokens and access protected
resources using the :class:`requests.Session` interface you are used to.
- :class:`oauthlib.oauth2.WebApplicationClient` (default): Authorization Code Grant
- :class:`oauthlib.oauth2.MobileApplicationClient`: Implicit Grant
- :class:`oauthlib.oauth2.LegacyApplicationClient`: Password Credentials Grant
- :class:`oauthlib.oauth2.BackendApplicationClient`: Client Credentials Grant
Note that the only time you will be using Implicit Grant from python is if
you are driving a user agent able to obtain URL fragments.
"""
def __init__(self, client_id=None, client=None, auto_refresh_url=None,
auto_refresh_kwargs=None, scope=None, redirect_uri=None, token=None,
state=None, token_updater=None, **kwargs):
"""Construct a new OAuth 2 client session.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param scope: List of scopes you wish to request access to
:param redirect_uri: Redirect URI you registered as callback
:param token: Token dictionary, must include access_token
and token_type.
:param state: State string used to prevent CSRF. This will be given
when creating the authorization url and must be supplied
when parsing the authorization response.
Can be either a string or a no argument callable.
:auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
this if you wish the client to automatically refresh
your access tokens.
:auto_refresh_kwargs: Extra arguments to pass to the refresh token
endpoint.
:token_updater: Method with one argument, token, to be used to update
your token databse on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session constructor.
"""
super(OAuth2Session, self).__init__(**kwargs)
self.client_id = client_id or client.client_id
self.scope = scope
self.redirect_uri = redirect_uri
self.token = token or {}
self.state = state or generate_token
self._state = state
self.auto_refresh_url = auto_refresh_url
self.auto_refresh_kwargs = auto_refresh_kwargs or {}
self.token_updater = token_updater
self._client = client or WebApplicationClient(client_id, token=token)
self._client._populate_attributes(token or {})
# Allow customizations for non compliant providers through various
# hooks to adjust requests and responses.
self.compliance_hook = {
'access_token_response': set([]),
'refresh_token_response': set([]),
'protected_request': set([]),
}
def new_state(self):
"""Generates a state string to be used in authorizations."""
try:
self._state = self.state()
log.debug('Generated new state %s.', self._state)
except TypeError:
self._state = self.state
log.debug('Re-using previously supplied state %s.', self._state)
return self._state
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
return bool(self._client.access_token)
def authorization_url(self, url, state=None, **kwargs):
"""Form an authorization URL.
:param url: Authorization endpoint url, must be HTTPS.
:param state: An optional state string for CSRF protection. If not
given it will be generated for you.
:param kwargs: Extra parameters to include.
:return: authorization_url, state
"""
state = state or self.new_state()
return self._client.prepare_request_uri(url,
redirect_uri=self.redirect_uri,
scope=self.scope,
state=state,
**kwargs), state
def fetch_token(self, token_url, code=None, authorization_response=None,
body='', auth=None, username=None, password=None, method='POST',
timeout=None, headers=None, verify=True, **kwargs):
"""Generic method for fetching an access token from the token endpoint.
If you are using the MobileApplicationClient you will want to use
token_from_fragment instead of fetch_token.
:param token_url: Token endpoint URL, must use HTTPS.
:param code: Authorization code (used by WebApplicationClients).
:param authorization_response: Authorization response URL, the callback
URL of the request back to you. Used by
WebApplicationClients instead of code.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param username: Username used by LegacyApplicationClients.
:param password: Password used by LegacyApplicationClients.
:param method: The HTTP method used to make the request. Defaults
to POST, but may also be GET. Other methods should
be added as needed.
:param headers: Dict to default request headers with.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
if not code and authorization_response:
self._client.parse_request_uri_response(authorization_response,
state=self._state)
code = self._client.code
elif not code and isinstance(self._client, WebApplicationClient):
code = self._client.code
if not code:
raise ValueError('Please supply either code or '
'authorization_code parameters.')
body = self._client.prepare_request_body(code=code, body=body,
redirect_uri=self.redirect_uri, username=username,
password=password, **kwargs)
headers = headers or {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
if method.upper() == 'POST':
r = self.post(token_url, data=dict(urldecode(body)),
timeout=timeout, headers=headers, auth=auth,
verify=verify)
log.debug('Prepared fetch token request body %s', body)
elif method.upper() == 'GET':
# if method is not 'POST', switch body to querystring and GET
r = self.get(token_url, params=dict(urldecode(body)),
timeout=timeout, headers=headers, auth=auth,
verify=verify)
log.debug('Prepared fetch token request querystring %s', body)
else:
raise ValueError('The method kwarg must be POST or GET.')
log.debug('Request to fetch token completed with status %s.',
r.status_code)
log.debug('Request headers were %s', r.request.headers)
log.debug('Request body was %s', r.request.body)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['access_token_response']))
for hook in self.compliance_hook['access_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
r.raise_for_status()
self._client.parse_request_body_response(r.text, scope=self.scope)
self.token = self._client.token
log.debug('Obtained token %s.', self.token)
return self.token
def token_from_fragment(self, authorization_response):
"""Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
"""
self._client.parse_request_uri_response(authorization_response,
state=self._state)
self.token = self._client.token
return self.token
def refresh_token(self, token_url, refresh_token=None, body='', auth=None,
timeout=None, verify=True, **kwargs):
"""Fetch a new access token using a refresh token.
:param token_url: The token endpoint, must be HTTPS.
:param refresh_token: The refresh_token to use.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not token_url:
raise ValueError('No token endpoint set for auto_refresh.')
if not is_secure_transport(token_url):
raise InsecureTransportError()
# Need to nullify token to prevent it from being added to the request
refresh_token = refresh_token or self.token.get('refresh_token')
self.token = {}
log.debug('Adding auto refresh key word arguments %s.',
self.auto_refresh_kwargs)
kwargs.update(self.auto_refresh_kwargs)
body = self._client.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=self.scope, **kwargs)
log.debug('Prepared refresh token request body %s', body)
r = self.post(token_url, data=dict(urldecode(body)), auth=auth,
timeout=timeout, verify=verify)
log.debug('Request to refresh token completed with status %s.',
r.status_code)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['refresh_token_response']))
for hook in self.compliance_hook['refresh_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
self.token = self._client.parse_request_body_response(r.text, scope=self.scope)
if not 'refresh_token' in self.token:
log.debug('No new refresh token given. Re-using old.')
self.token['refresh_token'] = refresh_token
return self.token
def request(self, method, url, data=None, headers=None, **kwargs):
"""Intercept all requests and add the OAuth 2 token if present."""
if not is_secure_transport(url):
raise InsecureTransportError()
if self.token:
log.debug('Invoking %d protected resource request hooks.',
len(self.compliance_hook['protected_request']))
for hook in self.compliance_hook['protected_request']:
log.debug('Invoking hook %s.', hook)
url, headers, data = hook(url, headers, data)
log.debug('Adding token %s to request.', self.token)
try:
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
# Attempt to retrieve and save new access token if expired
except TokenExpiredError:
if self.auto_refresh_url:
log.debug('Auto refresh is set, attempting to refresh at %s.',
self.auto_refresh_url)
token = self.refresh_token(self.auto_refresh_url)
if self.token_updater:
log.debug('Updating token to %s using %s.',
token, self.token_updater)
self.token_updater(token)
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
else:
raise TokenUpdated(token)
else:
raise
log.debug('Requesting url %s using method %s.', url, method)
log.debug('Supplying headers %s and data %s', headers, data)
log.debug('Passing through key word arguments %s.', kwargs)
return super(OAuth2Session, self).request(method, url,
headers=headers, data=data, **kwargs)
def register_compliance_hook(self, hook_type, hook):
"""Register a hook for request/response tweaking.
Available hooks are:
access_token_response invoked before token parsing.
refresh_token_response invoked before refresh token parsing.
protected_request invoked before making a request.
If you find a new hook is needed please send a GitHub PR request
or open an issue.
"""
if hook_type not in self.compliance_hook:
raise ValueError('Hook type %s is not in %s.',
hook_type, self.compliance_hook)
self.compliance_hook[hook_type].add(hook)
| bsd-3-clause |
dbertha/odoo | addons/sale_service/__openerp__.py | 260 | 2447 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Create Tasks on SO',
'version': '1.0',
'category': 'Project Management',
'description': """
Automatically creates project tasks from procurement lines.
===========================================================
This module will automatically create a new task for each procurement order line
(e.g. for sale order lines), if the corresponding product meets the following
characteristics:
* Product Type = Service
* Procurement Method (Order fulfillment) = MTO (Make to Order)
* Supply/Procurement Method = Manufacture
If on top of that a projet is specified on the product form (in the Procurement
tab), then the new task will be created in that specific project. Otherwise, the
new task will not belong to any project, and may be added to a project manually
later.
When the project task is completed or cancelled, the corresponding procurement
is updated accordingly. For example, if this procurement corresponds to a sale
order line, the sale order line will be considered delivered when the task is
completed.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['project', 'procurement', 'sale', 'procurement_jit'],
'data': ['views/sale_service_view.xml'],
'demo': ['demo/sale_service_demo.xml'],
'test': ['test/project_task_procurement.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
larsbutler/swift | test/unit/account/test_server.py | 2 | 99400 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import mock
import unittest
from tempfile import mkdtemp
from shutil import rmtree
from time import gmtime
from test.unit import FakeLogger
import itertools
import random
import json
from six import BytesIO
from six import StringIO
import xml.dom.minidom
from swift import __version__ as swift_version
from swift.common.swob import (Request, WsgiBytesIO, HTTPNoContent)
from swift.common import constraints
from swift.account.server import AccountController
from swift.common.utils import (normalize_timestamp, replication, public,
mkdirs, storage_directory)
from swift.common.request_helpers import get_sys_meta_prefix
from test.unit import patch_policies, debug_logger
from swift.common.storage_policy import StoragePolicy, POLICIES
@patch_policies
class TestAccountController(unittest.TestCase):
"""Test swift.account.server.AccountController"""
def setUp(self):
"""Set up for testing swift.account.server.AccountController"""
self.testdir_base = mkdtemp()
self.testdir = os.path.join(self.testdir_base, 'account_server')
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
def tearDown(self):
"""Tear down for testing swift.account.server.AccountController"""
try:
rmtree(self.testdir_base)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def test_OPTIONS(self):
server_handler = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = server_handler.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split():
self.assertTrue(
verb in resp.headers['Allow'].split(', '))
self.assertEqual(len(resp.headers['Allow'].split(', ')), 7)
self.assertEqual(resp.headers['Server'],
(server_handler.server_type + '/' + swift_version))
def test_DELETE_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
def test_DELETE_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_not_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
# We now allow deleting non-empty accounts
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_now_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c1',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '2',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_REPLICATE_insufficient_storage(self):
conf = {'devices': self.testdir, 'mount_check': 'true'}
self.account_controller = AccountController(conf)
def fake_check_mount(*args, **kwargs):
return False
with mock.patch("swift.common.constraints.check_mount",
fake_check_mount):
req = Request.blank('/sda1/p/suff',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
resp = req.get_response(self.account_controller)
self.assertEqual(resp.status_int, 507)
def test_REPLICATE_works(self):
mkdirs(os.path.join(self.testdir, 'sda1', 'account', 'p', 'a', 'a'))
db_file = os.path.join(self.testdir, 'sda1',
storage_directory('account', 'p', 'a'),
'a' + '.db')
open(db_file, 'w')
def fake_rsync_then_merge(self, drive, db_file, args):
return HTTPNoContent()
with mock.patch("swift.common.db_replicator.ReplicatorRpc."
"rsync_then_merge", fake_rsync_then_merge):
req = Request.blank('/sda1/p/a/',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
json_string = '["rsync_then_merge", "a.db"]'
inbuf = WsgiBytesIO(json_string)
req.environ['wsgi.input'] = inbuf
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# check valuerror
wsgi_input_valuerror = '["sync" : sync, "-1"]'
inbuf1 = WsgiBytesIO(wsgi_input_valuerror)
req.environ['wsgi.input'] = inbuf1
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_HEAD_not_found(self):
# Test the case in which account does not exist (can be recreated)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
# Test the case in which account was deleted but not yet reaped
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_HEAD_empty_account(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['x-account-container-count'], '0')
self.assertEqual(resp.headers['x-account-object-count'], '0')
self.assertEqual(resp.headers['x-account-bytes-used'], '0')
def test_HEAD_with_containers(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['x-account-container-count'], '2')
self.assertEqual(resp.headers['x-account-object-count'], '0')
self.assertEqual(resp.headers['x-account-bytes-used'], '0')
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '5'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['x-account-container-count'], '2')
self.assertEqual(resp.headers['x-account-object-count'], '4')
self.assertEqual(resp.headers['x-account-bytes-used'], '6')
def test_HEAD_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_HEAD_invalid_content_type(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'},
headers={'Accept': 'application/plain'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 406)
def test_HEAD_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_HEAD_invalid_format(self):
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
req = Request.blank('/sda1/p/a?format=' + format,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_not_found(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-PUT-Timestamp': normalize_timestamp(1),
'X-DELETE-Timestamp': normalize_timestamp(0),
'X-Object-Count': '1',
'X-Bytes-Used': '1',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
def test_PUT(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_simulated_create_race(self):
state = ['initial']
from swift.account.backend import AccountBroker as OrigAcBr
class InterceptedAcBr(OrigAcBr):
def __init__(self, *args, **kwargs):
super(InterceptedAcBr, self).__init__(*args, **kwargs)
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Save the original db_file attribute value
self._saved_db_file = self.db_file
self.db_file += '.doesnotexist'
def initialize(self, *args, **kwargs):
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Restore the original db_file attribute to get the race
# behavior
self.db_file = self._saved_db_file
return super(InterceptedAcBr, self).initialize(*args, **kwargs)
with mock.patch("swift.account.server.AccountBroker", InterceptedAcBr):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
state[0] = "race"
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_after_DELETE(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 403)
self.assertEqual(resp.body, 'Recently deleted')
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_PUT_non_utf8_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test': b'\xff'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
# Set sysmeta header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Sysmeta-Access-Control': b'\xff'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
# Send other
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Will-Not-Be-Saved': b'\xff'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_GET_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test2': 'Value2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'Value')
self.assertEqual(resp.headers.get('x-account-meta-test2'), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Account-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Account-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Account-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertTrue('x-account-meta-test' not in resp.headers)
def test_PUT_GET_sys_metadata(self):
prefix = get_sys_meta_prefix('account')
hdr = '%stest' % prefix
hdr2 = '%stest2' % prefix
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
hdr.title(): 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
hdr2.title(): 'Value2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'Value')
self.assertEqual(resp.headers.get(hdr2), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
hdr.title(): 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
hdr.title(): 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
hdr.title(): ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertTrue(hdr not in resp.headers)
def test_PUT_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_POST_HEAD_metadata(self):
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Account-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Account-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Account-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertTrue('x-account-meta-test' not in resp.headers)
def test_POST_HEAD_sys_metadata(self):
prefix = get_sys_meta_prefix('account')
hdr = '%stest' % prefix
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
hdr.title(): 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
hdr.title(): 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
hdr.title(): 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
hdr.title(): ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertTrue(hdr not in resp.headers)
def test_POST_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_POST_timestamp_not_float(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '0'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_POST_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_POST_after_DELETE_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_GET_not_found_plain(self):
# Test the case in which account does not exist (can be recreated)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
# Test the case in which account was deleted but not yet reaped
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_GET_not_found_json(self):
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_GET_not_found_xml(self):
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_GET_empty_account_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
def test_GET_empty_account_json(self):
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
def test_GET_empty_account_xml(self):
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
def test_GET_over_limit(self):
req = Request.blank(
'/sda1/p/a?limit=%d' % (constraints.ACCOUNT_LISTING_LIMIT + 1),
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412)
def test_GET_with_containers_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c1', 'c2'])
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c1', 'c2'])
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.charset, 'utf-8')
# test unknown format uses default plain
req = Request.blank('/sda1/p/a?format=somethinglese',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c1', 'c2'])
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.charset, 'utf-8')
def test_GET_with_containers_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body),
[{'count': 0, 'bytes': 0, 'name': 'c1'},
{'count': 0, 'bytes': 0, 'name': 'c2'}])
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body),
[{'count': 1, 'bytes': 2, 'name': 'c1'},
{'count': 3, 'bytes': 4, 'name': 'c2'}])
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
def test_GET_with_containers_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 2)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c1')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
self.assertEqual(listing[-1].nodeName, 'container')
container = \
[n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 2)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c1')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '1')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
self.assertEqual(listing[-1].nodeName, 'container')
container = [
n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '4')
self.assertEqual(resp.charset, 'utf-8')
def test_GET_xml_escapes_account_name(self):
req = Request.blank(
'/sda1/p/%22%27', # "'
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/%22%27?format=xml',
environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.attributes['name'].value, '"\'')
def test_GET_xml_escapes_container_name(self):
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/%22%3Cword', # "<word
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_PUT_TIMESTAMP': '1', 'HTTP_X_OBJECT_COUNT': '0',
'HTTP_X_DELETE_TIMESTAMP': '0', 'HTTP_X_BYTES_USED': '1'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(
dom.firstChild.firstChild.nextSibling.firstChild.firstChild.data,
'"<word')
def test_GET_xml_escapes_container_name_as_subdir(self):
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/%22%3Cword-test', # "<word-test
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_PUT_TIMESTAMP': '1', 'HTTP_X_OBJECT_COUNT': '0',
'HTTP_X_DELETE_TIMESTAMP': '0', 'HTTP_X_BYTES_USED': '1'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a?format=xml&delimiter=-',
environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(
dom.firstChild.firstChild.nextSibling.attributes['name'].value,
'"<word-')
def test_GET_limit_marker_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
for c in range(5):
req = Request.blank(
'/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?limit=3',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c0', 'c1', 'c2'])
req = Request.blank('/sda1/p/a?limit=3&marker=c2',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c3', 'c4'])
def test_GET_limit_marker_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
for c in range(5):
req = Request.blank(
'/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?limit=3&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body),
[{'count': 2, 'bytes': 3, 'name': 'c0'},
{'count': 2, 'bytes': 3, 'name': 'c1'},
{'count': 2, 'bytes': 3, 'name': 'c2'}])
req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body),
[{'count': 2, 'bytes': 3, 'name': 'c3'},
{'count': 2, 'bytes': 3, 'name': 'c4'}])
def test_GET_limit_marker_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
for c in range(5):
req = Request.blank(
'/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(c)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?limit=3&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 3)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c0')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
self.assertEqual(listing[-1].nodeName, 'container')
container = [
n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 2)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c3')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
self.assertEqual(listing[-1].nodeName, 'container')
container = [
n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c4')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
def test_GET_accept_wildcard(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = '*/*'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, 'c1\n')
def test_GET_accept_application_wildcard(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/*'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(json.loads(resp.body)), 1)
def test_GET_accept_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(json.loads(resp.body)), 1)
def test_GET_accept_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 1)
def test_GET_accept_conflicting(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=plain',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, 'c1\n')
def test_GET_accept_not_valid(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml*'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 406)
def test_GET_delimiter_too_long(self):
req = Request.blank('/sda1/p/a?delimiter=xx',
environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412)
def test_GET_prefix_delimiter_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for first in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
for second in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['sub.'])
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
resp.body.strip().split('\n'),
['sub.0', 'sub.0.', 'sub.1', 'sub.1.', 'sub.2', 'sub.2.'])
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'),
['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_prefix_delimiter_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for first in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
for second in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual([n.get('name', 's:' + n.get('subdir', 'error'))
for n in json.loads(resp.body)], ['s:sub.'])
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
[n.get('name', 's:' + n.get('subdir', 'error'))
for n in json.loads(resp.body)],
['sub.0', 's:sub.0.', 'sub.1', 's:sub.1.', 'sub.2', 's:sub.2.'])
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
[n.get('name', 's:' + n.get('subdir', 'error'))
for n in json.loads(resp.body)],
['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_prefix_delimiter_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for first in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
for second in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a?delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEqual(listing, ['s:sub.'])
req = Request.blank(
'/sda1/p/a?prefix=sub.&delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEqual(
listing,
['sub.0', 's:sub.0.', 'sub.1', 's:sub.1.', 'sub.2', 's:sub.2.'])
req = Request.blank(
'/sda1/p/a?prefix=sub.1.&delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEqual(listing, ['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_through_call(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '404 ')
def test_through_call_invalid_path(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/bob',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '400 ')
def test_through_call_invalid_path_utf8(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '\x00',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '412 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_params_format(self):
Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
for format in ('xml', 'json'):
req = Request.blank('/sda1/p/a?format=%s' % format,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
def test_params_utf8(self):
# Bad UTF8 sequence, all parameters should cause 400 error
for param in ('delimiter', 'limit', 'marker', 'prefix', 'end_marker',
'format'):
req = Request.blank('/sda1/p/a?%s=\xce' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
# Good UTF8 sequence for delimiter, too long (1 byte delimiters only)
req = Request.blank('/sda1/p/a?delimiter=\xce\xa9',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412,
"%d on param delimiter" % (resp.status_int))
Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'prefix', 'end_marker', 'format'):
req = Request.blank('/sda1/p/a?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204,
"%d on param %s" % (resp.status_int, param))
def test_PUT_auto_create(self):
headers = {'x-put-timestamp': normalize_timestamp(1),
'x-delete-timestamp': normalize_timestamp(0),
'x-object-count': '0',
'x-bytes-used': '0'}
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/.c',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_content_type_on_HEAD(self):
Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank('/sda1/p/a?format=xml', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
req = Request.blank('/sda1/p/a?format=json', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a', headers={'Accept': 'application/json'}, environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a', headers={'Accept': 'application/xml'}, environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
self.assertEqual(resp.charset, 'utf-8')
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertEqual(AccountController(conf).replication_server, None)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(AccountController(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(AccountController(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE']
for method_name in obj_methods:
method = getattr(self.controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.controller, method_name)
self.assertEqual(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.account.server.AccountController.__call__
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = AccountController(
{'devices': self.testdir,
'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x: mock.MagicMock(return_value=method_res))
with mock.patch.object(self.controller, method,
new=mock_method):
mock_method.replication = False
response = self.controller(env, start_response)
self.assertEqual(response, method_res)
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.account.server.AccountController.__call__
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = ['<html><h1>Method Not Allowed</h1><p>The method is not '
'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.controller, method,
new=mock_method):
mock_method.replication = True
response = self.controller.__call__(env, start_response)
self.assertEqual(response, answer)
def test_call_incorrect_replication_method(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'true'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST', 'OPTIONS']
for method in obj_methods:
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
self.controller(env, start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test__call__raise_timeout(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.logger = debug_logger('test')
self.account_controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false'},
logger=self.logger)
def start_response(*args):
# Sends args to outbuf
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
@public
def mock_put_method(*args, **kwargs):
raise Exception()
with mock.patch.object(self.account_controller, method,
new=mock_put_method):
response = self.account_controller.__call__(env, start_response)
self.assertTrue(response[0].startswith(
'Traceback (most recent call last):'))
self.assertEqual(self.logger.get_lines_for_level('error'), [
'ERROR __call__ error with %(method)s %(path)s : ' % {
'method': 'PUT', 'path': '/sda1/p/a/c'},
])
self.assertEqual(self.logger.get_lines_for_level('info'), [])
def test_GET_log_requests_true(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = True
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue(self.controller.logger.log_dict['info'])
def test_GET_log_requests_false(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = False
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse(self.controller.logger.log_dict['info'])
def test_log_line_format(self):
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
self.controller.logger = FakeLogger()
with mock.patch(
'time.gmtime', mock.MagicMock(side_effect=[gmtime(10001.0)])):
with mock.patch(
'time.time',
mock.MagicMock(side_effect=[10000.0, 10001.0, 10002.0])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=1234)):
req.get_response(self.controller)
self.assertEqual(
self.controller.logger.log_dict['info'],
[(('1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a" 404 '
'- "-" "-" "-" 2.0000 "-" 1234 -',), {})])
def test_policy_stats_with_legacy(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# add a container
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '4',
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# read back rollup
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
self.assertEqual(resp.headers['X-Account-Object-Count'], '2')
self.assertEqual(resp.headers['X-Account-Bytes-Used'], '4')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Object-Count' %
POLICIES[0].name], '2')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' %
POLICIES[0].name], '4')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Container-Count' %
POLICIES[0].name], '1')
def test_policy_stats_non_default(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# add a container
non_default_policies = [p for p in POLICIES if not p.is_default]
policy = random.choice(non_default_policies)
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '4',
'X-Backend-Storage-Policy-Index': policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# read back rollup
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
self.assertEqual(resp.headers['X-Account-Object-Count'], '2')
self.assertEqual(resp.headers['X-Account-Bytes-Used'], '4')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Object-Count' %
policy.name], '2')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' %
policy.name], '4')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Container-Count' %
policy.name], '1')
def test_empty_policy_stats(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for key in resp.headers:
self.assertTrue('storage-policy' not in key.lower())
def test_empty_except_for_used_policies(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# starts empty
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for key in resp.headers:
self.assertTrue('storage-policy' not in key.lower())
# add a container
policy = random.choice(POLICIES)
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '4',
'X-Backend-Storage-Policy-Index': policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# only policy of the created container should be in headers
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for key in resp.headers:
if 'storage-policy' in key.lower():
self.assertTrue(policy.name.lower() in key.lower())
def test_multiple_policies_in_use(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# add some containers
for policy in POLICIES:
count = policy.idx * 100 # good as any integer
container_path = '/sda1/p/a/c_%s' % policy.name
req = Request.blank(
container_path, method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': count,
'X-Bytes-Used': count,
'X-Backend-Storage-Policy-Index': policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
# check container counts in roll up headers
total_object_count = 0
total_bytes_used = 0
for key in resp.headers:
if 'storage-policy' not in key.lower():
continue
for policy in POLICIES:
if policy.name.lower() not in key.lower():
continue
if key.lower().endswith('object-count'):
object_count = int(resp.headers[key])
self.assertEqual(policy.idx * 100, object_count)
total_object_count += object_count
if key.lower().endswith('bytes-used'):
bytes_used = int(resp.headers[key])
self.assertEqual(policy.idx * 100, bytes_used)
total_bytes_used += bytes_used
expected_total_count = sum([p.idx * 100 for p in POLICIES])
self.assertEqual(expected_total_count, total_object_count)
self.assertEqual(expected_total_count, total_bytes_used)
@patch_policies([StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
StoragePolicy(2, 'two', False),
StoragePolicy(3, 'three', False)])
class TestNonLegacyDefaultStoragePolicy(TestAccountController):
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
weiawe/django | django/utils/datastructures.py | 55 | 9177 | import copy
from collections import OrderedDict
from django.utils import six
class OrderedSet(object):
"""
A set which keeps the ordering of the inserted items.
Currently backs onto OrderedDict.
"""
def __init__(self, iterable=None):
self.dict = OrderedDict(((x, None) for x in iterable) if iterable else [])
def add(self, item):
self.dict[item] = None
def remove(self, item):
del self.dict[item]
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
def __iter__(self):
return iter(self.dict.keys())
def __contains__(self, item):
return item in self.dict
def __bool__(self):
return bool(self.dict)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError(repr(key))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = {k: self.getlist(k) for k in self}
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key, default=None):
"""
Returns the list of values for the passed key. If key doesn't exist,
then a default value is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
# Do not return default here because __setitem__() may store
# another value -- QueryDict.__setitem__() does. Look it up.
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
# Do not return default_list here because setlist() may store
# another value -- QueryDict.setlist() does. Look it up.
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def _iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def _iterlists(self):
"""Yields (key, list) pairs."""
return six.iteritems(super(MultiValueDict, self))
def _itervalues(self):
"""Yield the last value on every key list."""
for key in self:
yield self[key]
if six.PY3:
items = _iteritems
lists = _iterlists
values = _itervalues
else:
iteritems = _iteritems
iterlists = _iterlists
itervalues = _itervalues
def items(self):
return list(self.iteritems())
def lists(self):
return list(self.iterlists())
def values(self):
return list(self.itervalues())
def copy(self):
"""Returns a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in six.iteritems(kwargs):
self.setlistdefault(key).append(value)
def dict(self):
"""
Returns current object as a dict with singular values.
"""
return {key: self[key] for key in self}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| bsd-3-clause |
Koonkie/MetaPathways_Python_Koonkie.3.0 | libs/python_scripts/MetaPathways_create_biom.py | 2 | 11847 | #!/usr/bin/python
# File created on Nov 27 Jan 2012
from __future__ import division
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
from os import makedirs, sys, remove, rename
from sys import path
import os, re, traceback
from optparse import OptionParser, OptionGroup
from glob import glob
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters, fprintf, printf, eprintf
from libs.python_modules.utils.metapathways_utils import strip_taxonomy, ShortenORFId, ShortenContigId, ContigID, ShortenrRNAId
from libs.python_modules.utils.sysutil import getstatusoutput
from libs.python_modules.utils.utils import doesFileExist, createDummyFile
from libs.python_modules.utils.errorcodes import *
from libs.python_modules.utils.sysutil import pathDelim
from libs.python_modules.parsers.blast import BlastOutputTsvParser, getParsedBlastFileNames, getrRNAStatFileNames
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed 'source MetaPathwaysrc'"""
print """ """
sys.exit(3)
usage= sys.argv[0] + """ dbname2 -b parsed_blastout_for_database2 -w weight_for_database2 ] [ --rRNA_16S 16SrRNA-stats-table ] [ --tRNA tRNA-stats-table ] [ --compact_output ]"""
parser = None
PATHDELIM = str(pathDelim())
def createParser():
global parser
epilog = """Reads the parsed BLAST/LAST files to create files that provides gene count
and then convert to biom format """
epilog = re.sub(r'\s+',' ', epilog)
parser = OptionParser(usage = usage, epilog = epilog)
parser.add_option("-a", "--algorithm", dest="algorithm", default="BLAST", help="algorithm BLAST or LAST [default BLAST]" )
parser.add_option("-B", "--blastdir", dest="blastdir", default=None,
help='the blast dir where all the BLAST outputs are located')
parser.add_option("-O", "--outputdir", dest="outputdir", default=None,
help='the output for the biom files')
parser.add_option("-s", "--samplename", dest="sample_name", default=None,
help='the sample name')
parser.add_option( "-R", "--rRNAdir", dest="rRNAdir", default=None,
help='the folder where the rRNA stats are [OPTIONAL]')
parser.add_option( "--readcounts", dest="readcounts", default=None,
help='the file with the orfwise read counts')
parser.add_option( "--readrpkgs", dest="readrpkgs", default=None,
help='the file with the orfwise rpkgreads')
cutoffs_group = OptionGroup(parser, 'Cuttoff Related Options')
cutoffs_group.add_option("--min_score", dest="min_score", type='float', default=20,
help='the minimum bit score cutoff [default = 20 ] ')
cutoffs_group.add_option("--max_evalue", dest="max_evalue", type='float', default=1e-6,
help='the maximum E-value cutoff [ default = 1e-6 ] ')
cutoffs_group.add_option("--min_length", dest="min_length", type='float', default=30,
help='the minimum length of query cutoff [default = 30 ] ')
cutoffs_group.add_option("--min_identity", dest="min_identity", type='float', default=20,
help='the minimum identity of query cutoff [default 30 ] ')
cutoffs_group.add_option("--limit", dest="limit", type='float', default=1,
help='max number of hits per query cutoff [default = 5 ] ')
parser.add_option_group(cutoffs_group)
def check_arguments(opts, args):
if opts.blastdir == None:
eprintf("The blast_results folder must be specified\n")
return False
if opts.sample_name == None:
eprintf("There should be at least one sample name\n")
return False
return True
def write_16S_tRNA_gene_info(rRNA_dictionary, outputgff_file, tag):
fields = [ 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame' ]
for rRNA in rRNA_dictionary:
output_line= rRNA_dictionary[rRNA]['id']
for field in fields:
output_line += "\t"+ str(rRNA_dictionary[rRNA][field])
attributes = "ID="+rRNA_dictionary[rRNA]['seqname'] + tag
attributes += ";" + "locus_tag="+rRNA_dictionary[rRNA]['seqname'] + tag
attributes += ";" + "orf_length=" + str(rRNA_dictionary[rRNA]['orf_length'])
attributes += ";" + "contig_length=" + str(rRNA_dictionary[rRNA]['contig_length'])
attributes += ";" + "ec="
attributes += ";" + "product="+rRNA_dictionary[rRNA]['product']
output_line += '\t' + attributes
fprintf(outputgff_file, "%s\n", output_line);
def process_rRNA_16S_stats(dbname, rRNA_16S_file, orf_read_rpkgs, opts, shortenorfid=False):
print "Processing rRNA database : ", dbname
counter_rRNA={}
if not doesFileExist(rRNA_16S_file):
return
try:
taxonomy_file = open(rRNA_16S_file, 'r')
except IOError:
eprintf("Cannot read file %s!\n", rRNA_16S_file)
exit_process()
tax_lines = taxonomy_file.readlines()
similarity_pattern = re.compile("similarity")
evalue_pattern = re.compile("evalue")
bitscore_pattern = re.compile("bitscore")
taxonomy_pattern = re.compile("taxonomy")
headerScanned = False
seencounter = {}
for line in tax_lines:
if headerScanned == False:
if similarity_pattern.search(line) and evalue_pattern.search(line) and bitscore_pattern.search(line) and taxonomy_pattern.search(line):
headerScanned = True
continue
fields = [ x.strip() for x in line.split('\t') ]
if len(fields) >=6:
if not fields[0] in seencounter:
seencounter[fields[0]]=0
else:
seencounter[fields[0]] +=1
_name = fields[0] + "_" + str(seencounter[fields[0]] ) + "_rRNA"
if not fields[6] in counter_rRNA:
counter_rRNA[fields[6]] = 0.0
name = ShortenrRNAId(_name)
if name in orf_read_rpkgs:
counter_rRNA[fields[6]] += orf_read_rpkgs[name]
else:
counter_rRNA[fields[6]] += 0
taxonomy_file.close()
with open(opts.outputdir + PATHDELIM + opts.sample_name + "." + dbname + ".read_rpkgs.txt", 'w') as fout:
fprintf(fout, "# Gene\tCounts\n");
for name in counter_rRNA:
fprintf(fout, "%s\t%0.2f\n", name, counter_rRNA[name])
return len(counter_rRNA)
def get_sequence_number(line):
seqnamePATT = re.compile(r'[\S]+_(\d+)$')
result = seqnamePATT.search(line.strip())
if result:
return result.group(1)
return line
def getFunctionName(dbname, data):
COG_NAME = re.compile(r'^cog', re.IGNORECASE)
UNIPROT_NAME = re.compile(r'^uniprot.*sprot', re.IGNORECASE)
COG_PATT = re.compile(r'(COG\d\d\d\d)')
UNIPROT_PATT = re.compile("[ts][rp]\|([A-Z0-9]+)\|")
if COG_NAME.search(dbname):
res = COG_PATT.search(data['product'])
if res:
return res.group(1)
else:
return data['target']
if UNIPROT_NAME.search(dbname):
res = UNIPROT_PATT.search(data['target'])
if res:
return res.group(1)
else:
return data['target']
return data['target']
def isWithinCutoffs(data, cutoffs):
if data['q_length'] < cutoffs.min_length:
return False
if data['bitscore'] < cutoffs.min_score:
return False
if data['expect'] > cutoffs.max_evalue:
return False
if data['identity'] < cutoffs.min_identity:
return False
return True
#read counts
def read_orf_read_counts(orf_read_counts, readcounts_file):
if readcounts_file==None:
return
comment_PATT = re.compile(r'^#')
with open(readcounts_file, 'r') as finput:
for line in finput:
if not comment_PATT.search(line):
fields = [ x.strip() for x in line.split('\t') ]
orf_read_counts[fields[0]] = float(fields[1])
print orf_read_counts[fields[0]], float(fields[1])
# compute the refscores
def process_parsed_blastoutput(dbname, blastoutput, opts, orf_read_counts):
blastparser = BlastOutputTsvParser(dbname, blastoutput, shortenorfid=False)
hit_counts={}
for data in blastparser:
#if count%10000==0:
if isWithinCutoffs(data, opts) :
target = getFunctionName(dbname, data)
if not target in hit_counts:
hit_counts[target] = 0
if data['query'] in orf_read_counts:
hit_counts[target] += orf_read_counts[data['query']]
else:
#print 'query', data['query']
hit_counts[target] += 1
#print data
#for name in hit_counts:
# print name, hit_counts[name]
filename = opts.outputdir + PATHDELIM + opts.sample_name + "." + dbname
filename_txt = filename + ".read_counts.txt"
filename_biom = filename + ".read_counts.biom"
with open(filename_txt, 'w') as fout:
fprintf(fout, "# Gene\tCounts\n");
for name in hit_counts:
fprintf(fout, "%s\t%d\n", name, hit_counts[name])
runBIOMCommand(filename_txt, filename_biom, biomExec="biom")
return len(hit_counts)
def runBIOMCommand(infile, outfile, biomExec="biom"):
commands = [biomExec, " convert", "-i", infile, "-o", outfile, "--table-type=\"Gene table\"", "--to-hdf5"]
result = getstatusoutput(' '.join(commands))
return result[0]
#def getBlastFileNames(opts) :
# the main function
def main(argv, errorlogger =None, runstatslogger = None):
global parser
(opts, args) = parser.parse_args(argv)
if not check_arguments(opts, args):
print usage
sys.exit(0)
# read the ORFwise counts
orf_read_counts={}
if os.path.exists(opts.readcounts):
read_orf_read_counts(orf_read_counts, opts.readcounts)
# Functional databases
database_names, input_blastouts = getParsedBlastFileNames(opts.blastdir, opts.sample_name, opts.algorithm)
priority = 6000
count_annotations = {}
for dbname, blastoutput, in zip(database_names, input_blastouts):
count = process_parsed_blastoutput(dbname, blastoutput, opts, orf_read_counts)
if runstatslogger!=None:
runstatslogger.write("%s\tProtein Annotations from %s\t%s\n" %( str(priority), dbname, str(count)))
# rRNA databases
orf_read_rpkgs={}
if os.path.exists(opts.readrpkgs):
read_orf_read_counts(orf_read_rpkgs, opts.readrpkgs)
rRNA_16S_dictionary = {}
database_names, input_blastouts = getrRNAStatFileNames(opts.rRNAdir, opts.sample_name, opts.algorithm)
for dbname, rRNAStatsFile in zip(database_names, input_blastouts):
process_rRNA_16S_stats(dbname, rRNAStatsFile, orf_read_rpkgs, opts, shortenorfid=False)
createDummyFile(opts.outputdir + PATHDELIM + opts.sample_name + ".dummy.txt")
#create the annotations from he results
def MetaPathways_create_biom(argv, errorlogger = None, runstatslogger = None):
createParser()
errorlogger.write("#STEP\tCREATE_BIOM\n")
try:
main(argv, errorlogger = errorlogger, runstatslogger = runstatslogger)
except:
insert_error(11)
return (0,'')
# the main function of metapaths
if __name__ == "__main__":
createParser()
try:
main(sys.argv[1:])
except:
insert_error(11)
| mit |
JarbasAI/JarbasAI | mycroft/client/enclosure/arduino.py | 1 | 1707 | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'jdorleans'
class EnclosureArduino:
"""
Listens to enclosure commands for Mycroft's Arduino.
Performs the associated command on Arduino by writing on the Serial port.
"""
def __init__(self, ws, writer):
self.ws = ws
self.writer = writer
self.__init_events()
def __init_events(self):
self.ws.on('enclosure.system.reset', self.reset)
self.ws.on('enclosure.system.mute', self.mute)
self.ws.on('enclosure.system.unmute', self.unmute)
self.ws.on('enclosure.system.blink', self.blink)
def reset(self, event=None):
self.writer.write("system.reset")
def mute(self, event=None):
self.writer.write("system.mute")
def unmute(self, event=None):
self.writer.write("system.unmute")
def blink(self, event=None):
times = 1
if event and event.data:
times = event.data.get("times", times)
self.writer.write("system.blink=" + str(times))
| gpl-3.0 |
apache/beam | sdks/python/apache_beam/io/external/xlang_snowflakeio_it_test.py | 5 | 9103 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration test for cross-language snowflake io operations.
Example run:
python setup.py nosetests --tests=apache_beam.io.external.snowflake_test \
--test-pipeline-options="
--server_name=<SNOWFLAKE_SERVER_NAME>
--username=<SNOWFLAKE_USERNAME>
--password=<SNOWFLAKE_PASSWORD>
--private_key_path=<PATH_TO_PRIVATE_KEY>
--raw_private_key=<RAW_PRIVATE_KEY>
--private_key_passphrase=<PASSWORD_TO_PRIVATE_KEY>
--o_auth_token=<TOKEN>
--staging_bucket_name=<GCP_BUCKET_PATH>
--storage_integration_name=<SNOWFLAKE_STORAGE_INTEGRATION_NAME>
--database=<DATABASE>
--schema=<SCHEMA>
--role=<ROLE>
--warehouse=<WAREHOUSE>
--table=<TABLE_NAME>
--runner=FlinkRunner"
"""
# pytype: skip-file
import argparse
import binascii
import logging
import unittest
from typing import ByteString
from typing import NamedTuple
import apache_beam as beam
from apache_beam import coders
from apache_beam.io.snowflake import CreateDisposition
from apache_beam.io.snowflake import ReadFromSnowflake
from apache_beam.io.snowflake import WriteDisposition
from apache_beam.io.snowflake import WriteToSnowflake
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from apache_beam.io.gcp.gcsfilesystem import GCSFileSystem
except ImportError:
GCSFileSystem = None
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
SCHEMA_STRING = """
{"schema":[
{"dataType":{"type":"integer","precision":38,"scale":0},"name":"number_column","nullable":false},
{"dataType":{"type":"boolean"},"name":"boolean_column","nullable":false},
{"dataType":{"type":"binary","size":100},"name":"bytes_column","nullable":true}
]}
"""
TestRow = NamedTuple(
'TestRow',
[
('number_column', int),
('boolean_column', bool),
('bytes_column', ByteString),
])
coders.registry.register_coder(TestRow, coders.RowCoder)
NUM_RECORDS = 100
@unittest.skipIf(GCSFileSystem is None, 'GCP dependencies are not installed')
@unittest.skipIf(
TestPipeline().get_option('server_name') is None,
'Snowflake IT test requires external configuration to be run.')
class SnowflakeTest(unittest.TestCase):
def test_snowflake_write_read(self):
self.run_write()
self.run_read()
def run_write(self):
def user_data_mapper(test_row):
return [
str(test_row.number_column).encode('utf-8'),
str(test_row.boolean_column).encode('utf-8'),
binascii.hexlify(test_row.bytes_column),
]
with TestPipeline(options=PipelineOptions(self.pipeline_args)) as p:
p.not_use_test_runner_api = True
_ = (
p
| 'Impulse' >> beam.Impulse()
| 'Generate' >> beam.FlatMap(lambda x: range(NUM_RECORDS)) # pylint: disable=range-builtin-not-iterating
| 'Map to TestRow' >> beam.Map(
lambda num: TestRow(
num, num % 2 == 0, b"test" + str(num).encode()))
| WriteToSnowflake(
server_name=self.server_name,
username=self.username,
password=self.password,
o_auth_token=self.o_auth_token,
private_key_path=self.private_key_path,
raw_private_key=self.raw_private_key,
private_key_passphrase=self.private_key_passphrase,
schema=self.schema,
database=self.database,
role=self.role,
warehouse=self.warehouse,
staging_bucket_name=self.staging_bucket_name,
storage_integration_name=self.storage_integration_name,
create_disposition=CreateDisposition.CREATE_IF_NEEDED,
write_disposition=WriteDisposition.TRUNCATE,
table_schema=SCHEMA_STRING,
user_data_mapper=user_data_mapper,
table=self.table,
query=None,
expansion_service=self.expansion_service,
))
def run_read(self):
def csv_mapper(bytes_array):
return TestRow(
int(bytes_array[0]),
bytes_array[1] == b'true',
binascii.unhexlify(bytes_array[2]))
with TestPipeline(options=PipelineOptions(self.pipeline_args)) as p:
result = (
p
| ReadFromSnowflake(
server_name=self.server_name,
username=self.username,
password=self.password,
o_auth_token=self.o_auth_token,
private_key_path=self.private_key_path,
raw_private_key=self.raw_private_key,
private_key_passphrase=self.private_key_passphrase,
schema=self.schema,
database=self.database,
role=self.role,
warehouse=self.warehouse,
staging_bucket_name=self.staging_bucket_name,
storage_integration_name=self.storage_integration_name,
csv_mapper=csv_mapper,
table=self.table,
query=None,
expansion_service=self.expansion_service,
).with_output_types(TestRow))
assert_that(
result,
equal_to([
TestRow(i, i % 2 == 0, b'test' + str(i).encode())
for i in range(NUM_RECORDS)
]))
@classmethod
def tearDownClass(cls):
GCSFileSystem(pipeline_options=PipelineOptions()) \
.delete([cls.staging_bucket_name])
@classmethod
def setUpClass(cls):
parser = argparse.ArgumentParser()
parser.add_argument(
'--server_name',
required=True,
help=(
'Snowflake server name of the form '
'https://<SNOWFLAKE_ACCOUNT_NAME>.snowflakecomputing.com'),
)
parser.add_argument(
'--username',
help='Snowflake username',
)
parser.add_argument(
'--password',
help='Snowflake password',
)
parser.add_argument(
'--private_key_path',
help='Path to private key',
)
parser.add_argument(
'--raw_private_key',
help='Raw private key',
)
parser.add_argument(
'--private_key_passphrase',
help='Password to private key',
)
parser.add_argument(
'--o_auth_token',
help='OAuth token',
)
parser.add_argument(
'--staging_bucket_name',
required=True,
help='GCP staging bucket name (must end with backslash)',
)
parser.add_argument(
'--storage_integration_name',
required=True,
help='Snowflake integration name',
)
parser.add_argument(
'--database',
required=True,
help='Snowflake database name',
)
parser.add_argument(
'--schema',
required=True,
help='Snowflake schema name',
)
parser.add_argument(
'--table',
required=True,
help='Snowflake table name',
)
parser.add_argument(
'--role',
help='Snowflake role',
)
parser.add_argument(
'--warehouse',
help='Snowflake warehouse name',
)
parser.add_argument(
'--expansion_service',
help='Url to externally launched expansion service.',
)
pipeline = TestPipeline()
argv = pipeline.get_full_options_as_args()
known_args, cls.pipeline_args = parser.parse_known_args(argv)
cls.server_name = known_args.server_name
cls.database = known_args.database
cls.schema = known_args.schema
cls.table = known_args.table
cls.username = known_args.username
cls.password = known_args.password
cls.private_key_path = known_args.private_key_path
cls.raw_private_key = known_args.raw_private_key
cls.private_key_passphrase = known_args.private_key_passphrase
cls.o_auth_token = known_args.o_auth_token
cls.staging_bucket_name = known_args.staging_bucket_name
cls.storage_integration_name = known_args.storage_integration_name
cls.role = known_args.role
cls.warehouse = known_args.warehouse
cls.expansion_service = known_args.expansion_service
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
akash1808/nova_test_latest | nova/tests/unit/scheduler/test_chance_scheduler.py | 69 | 3776 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Chance Scheduler.
"""
import random
from mox3 import mox
from nova import context
from nova import exception
from nova.scheduler import chance
from nova.tests.unit.scheduler import test_scheduler
class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Chance Scheduler."""
driver_cls = chance.ChanceScheduler
def test_filter_hosts_avoid(self):
"""Test to make sure _filter_hosts() filters original hosts if
avoid_original_host is True.
"""
hosts = ['host1', 'host2', 'host3']
request_spec = dict(instance_properties=dict(host='host2'))
filter_properties = {'ignore_hosts': ['host2']}
filtered = self.driver._filter_hosts(request_spec, hosts,
filter_properties=filter_properties)
self.assertEqual(filtered, ['host1', 'host3'])
def test_filter_hosts_no_avoid(self):
"""Test to make sure _filter_hosts() does not filter original
hosts if avoid_original_host is False.
"""
hosts = ['host1', 'host2', 'host3']
request_spec = dict(instance_properties=dict(host='host2'))
filter_properties = {'ignore_hosts': []}
filtered = self.driver._filter_hosts(request_spec, hosts,
filter_properties=filter_properties)
self.assertEqual(filtered, hosts)
def test_select_destinations(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
request_spec = {'num_instances': 2}
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.mox.StubOutWithMock(random, 'choice')
hosts_full = ['host1', 'host2', 'host3', 'host4']
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
random.choice(hosts_full).AndReturn('host3')
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
random.choice(hosts_full).AndReturn('host2')
self.mox.ReplayAll()
dests = self.driver.select_destinations(ctxt, request_spec, {})
self.assertEqual(2, len(dests))
(host, node) = (dests[0]['host'], dests[0]['nodename'])
self.assertEqual('host3', host)
self.assertIsNone(node)
(host, node) = (dests[1]['host'], dests[1]['nodename'])
self.assertEqual('host2', host)
self.assertIsNone(node)
def test_select_destinations_no_valid_host(self):
def _return_no_host(*args, **kwargs):
return []
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.driver.hosts_up(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn([1, 2])
self.stubs.Set(self.driver, '_filter_hosts', _return_no_host)
self.mox.ReplayAll()
request_spec = {'num_instances': 1}
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context,
request_spec, {})
| apache-2.0 |
ladybug-analysis-tools/ladybug-core | ladybug/color.py | 1 | 21972 | # coding=utf-8
"""Ladybug color, colorsets and colorrange."""
from __future__ import division
try:
from collections.abc import Iterable # python < 3.7
except ImportError:
from collections import Iterable # python >= 3.8
class Color(object):
"""Ladybug RGBA color.
Args:
r: red value 0-255, default: 0
g: green value 0-255, default: 0
b: blue red value 0-255, default: 0
a: alpha value 0-255. Alpha defines the opacity as a number between 0 (fully
transparent) and 255 (fully opaque). Default 255.
Properties:
* r
* g
* b
* a
"""
__slots__ = ("_r", "_g", "_b", "_a")
def __init__(self, r=0, g=0, b=0, a=255):
"""Generate RGB Color.
"""
self.r = r
self.g = g
self.b = b
self.a = a
@classmethod
def from_dict(cls, data):
"""Create a color from a dictionary.
Args:
data: A python dictionary in the following format
.. code-block:: python
{
"r": 255,
"g": 0,
"b": 150,
"a": 255
}
"""
a = data['a'] if 'a' in data else 255
return cls(data['r'], data['g'], data['b'], a)
@property
def r(self):
"""Return R value."""
return self._r
@r.setter
def r(self, value):
assert 0 <= int(value) <= 255, "%d is out of range. " % value + \
"R value should be between 0-255"
self._r = int(value)
@property
def g(self):
"""Return G value."""
return self._g
@g.setter
def g(self, value):
assert 0 <= int(value) <= 255, "%d is out of range. " % value + \
"G value should be between 0-255"
self._g = int(value)
@property
def b(self):
"""Return B value."""
return self._b
@b.setter
def b(self, value):
assert 0 <= int(value) <= 255, "%d is out of range. " % value + \
"B value should be between 0-255"
self._b = int(value)
@property
def a(self):
"""Return A value."""
return self._a
@a.setter
def a(self, value):
assert 0 <= int(value) <= 255, "%d is out of range. " % value + \
"B value should be between 0-255"
self._a = int(value)
def duplicate(self):
"""Return a copy of the current color."""
return self.__copy__()
def to_dict(self):
"""Get color as a dictionary."""
return {
'r': self.r,
'g': self.g,
'b': self.b,
'a': self.a,
'type': 'Color'
}
def __copy__(self):
return self.__class__(self.r, self.g, self.b, self.a)
def __eq__(self, other):
if isinstance(other, Color):
return self.r == other.r and self.g == other.g and self.b == other.b and \
self.a == other.a
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.r, self.g, self.b, self.a))
def __len__(self):
return 4
def __getitem__(self, key):
return (self.r, self.g, self.b, self.a)[key]
def __iter__(self):
return iter((self.r, self.g, self.b, self.a))
def ToString(self):
"""Overwrite .NET ToString."""
return self.__repr__()
def __repr__(self):
"""Return RGB values."""
return "(R:%d, G:%d, B:%d, A:%d)" % (self._r, self._g, self._b, self._a)
# TODO: Add support for CMYK
class Colorset(object):
"""Ladybug Color-range repository.
A list of default Ladybug colorsets for color range:
* 0 - original Ladybug
* 1 - nuanced Ladybug
* 2 - Multi-colored Ladybug
* 3 - View Analysis 1
* 4 - View Analysis 2 (Red,Green,Blue)
* 5 - Sunlight Hours
* 6 - ecotect
* 7 - thermal Comfort Percentage
* 8 - thermal Comfort Colors
* 9 - thermal Comfort Colors (UTCI)
* 10 - Hot Hours
* 11 - Cold Hours
* 12 - Shade Benefit/Harm
* 13 - thermal Comfort Colors v2 (UTCI)
* 14 - Shade Harm
* 15 - Shade Benefit
* 16 - Black to White
* 17 - CFD Colors 1
* 18 - CFD Colors 2
* 19 - Energy Balance
* 20 - THERM
* 21 - Cloud Cover
Usage:
.. code-block:: python
# initialize colorsets
cs = Colorset()
print(cs[0])
>> [<R:75, G:107, B:169>, <R:115, G:147, B:202>, <R:170, G:200, B:247>,
<R:193, G:213, B:208>, <R:245, G:239, B:103>, <R:252, G:230, B:74>,
<R:239, G:156, B:21>, <R:234, G:123, B:0>, <R:234, G:74, B:0>,
<R:234, G:38, B:0>]
"""
# base color sets for which there are several variations
_multicolored = [(4, 25, 145), (7, 48, 224), (7, 88, 255), (1, 232, 255),
(97, 246, 156), (166, 249, 86), (254, 244, 1), (255, 121, 0),
(239, 39, 0), (138, 17, 0)]
_thermalcomfort = [(0, 136, 255), (200, 225, 255), (255, 255, 255),
(255, 230, 230), (255, 0, 0)]
_benefitharm = [(0, 191, 48), (255, 238, 184), (255, 0, 0)]
_shadebenefitharm = [(5, 48, 97), (33, 102, 172), (67, 147, 195), (146, 197, 222),
(209, 229, 240), (255, 255, 255), (253, 219, 199),
(244, 165, 130), (214, 96, 77), (178, 24, 43), (103, 0, 31)]
# dictionary of all color sets together
_colors = {
0: [(75, 107, 169), (115, 147, 202), (170, 200, 247), (193, 213, 208),
(245, 239, 103), (252, 230, 74), (239, 156, 21), (234, 123, 0),
(234, 74, 0), (234, 38, 0)],
1: [(49, 54, 149), (69, 117, 180), (116, 173, 209), (171, 217, 233),
(224, 243, 248), (255, 255, 191), (254, 224, 144), (253, 174, 97),
(244, 109, 67), (215, 48, 39), (165, 0, 38)],
2: _multicolored,
3: [(0, 0, 255), (53, 0, 202), (107, 0, 148), (160, 0, 95), (214, 0, 41),
(255, 12, 0), (255, 66, 0), (255, 119, 0), (255, 173, 0), (255, 226, 0),
(255, 255, 0)],
4: [(255, 20, 147), (240, 47, 145), (203, 117, 139), (160, 196, 133),
(132, 248, 129), (124, 253, 132), (96, 239, 160), (53, 217, 203),
(15, 198, 240), (0, 191, 255)],
5: [(55, 55, 55), (235, 235, 235)],
6: [(156, 217, 255), (255, 243, 77), (255, 115, 0), (255, 0, 0), (0, 0, 0)],
7: [(0, 0, 0), (110, 0, 153), (255, 0, 0), (255, 255, 102), (255, 255, 255)],
8: _thermalcomfort,
9: [(255, 251, 0), (255, 0, 0), (148, 24, 24), (135, 178, 224),
(255, 175, 46), (255, 242, 140), (204, 204, 204)],
10: _thermalcomfort[2:],
11: list(reversed(_thermalcomfort[:3])),
12: _benefitharm,
13: _benefitharm[1:],
14: list(reversed(_benefitharm[:2])),
15: _shadebenefitharm,
16: _shadebenefitharm[5:],
17: list(reversed(_shadebenefitharm[:6])),
18: list(reversed(_multicolored)),
19: list(reversed(_multicolored)) + [(128, 102, 64)],
20: [(0, 0, 0), (137, 0, 139), (218, 0, 218), (196, 0, 255), (0, 92, 255),
(0, 198, 252), (0, 244, 215), (0, 220, 101), (7, 193, 0), (115, 220, 0),
(249, 251, 0), (254, 178, 0), (253, 77, 0), (255, 15, 15),
(255, 135, 135), (255, 255, 255)],
21: [(0, 251, 255), (255, 255, 255), (217, 217, 217), (83, 114, 115)],
22: [(0, 0, 0), (255, 255, 255)],
23: [(0, 0, 255), (0, 255, 100), (255, 0, 0)],
24: [(0, 16, 120), (38, 70, 160), (5, 180, 222), (16, 180, 109),
(59, 183, 35), (143, 209, 19), (228, 215, 29), (246, 147, 17),
(243, 74, 0), (255, 0, 0)],
25: [(69, 92, 166), (66, 128, 167), (62, 176, 168), (78, 181, 137),
(120, 188, 59), (139, 184, 46), (197, 157, 54), (220, 144, 57),
(228, 100, 59), (233, 68, 60)],
26: [(230, 180, 60), (230, 215, 150), (165, 82, 0),
(128, 20, 20), (255, 128, 128), (64, 128, 128),
(128, 128, 128), (255, 128, 128), (128, 64, 0),
(64, 180, 255), (160, 150, 100), (120, 75, 190), (255, 255, 200),
(0, 128, 0)]
}
def __init__(self):
"""Initialize Color-sets."""
pass
@classmethod
def original(cls):
"""Original Ladybug colors."""
return tuple(Color(*color) for color in cls._colors[0])
@classmethod
def nuanced(cls):
"""Nuanced Ladybug colors."""
return tuple(Color(*color) for color in cls._colors[1])
@classmethod
def multi_colored(cls):
"""Multi-colored legend."""
return tuple(Color(*color) for color in cls._colors[2])
@classmethod
def ecotect(cls):
"""Ecotect colors."""
return tuple(Color(*color) for color in cls._colors[3])
@classmethod
def view_study(cls):
"""View analysis colors."""
return tuple(Color(*color) for color in cls._colors[4])
@classmethod
def shadow_study(cls):
"""Shadow study colors (dark to light grey)."""
return tuple(Color(*color) for color in cls._colors[5])
@classmethod
def glare_study(cls):
"""Useful for depicting spatial glare (light blue to yellow, red, black)."""
return tuple(Color(*color) for color in cls._colors[6])
@classmethod
def annual_comfort(cls):
"""Good for annual metrics like UDI and thermal comfort percent."""
return tuple(Color(*color) for color in cls._colors[7])
@classmethod
def thermal_comfort(cls):
"""Thermal comfort colors (blue to white to red)."""
return tuple(Color(*color) for color in cls._colors[8])
@classmethod
def peak_load_balance(cls):
"""Colors for the typical terms of a peak load balance."""
return tuple(Color(*color) for color in cls._colors[9])
@classmethod
def heat_sensation(cls):
"""Red colors for heat sensation."""
return tuple(Color(*color) for color in cls._colors[10])
@classmethod
def cold_sensation(cls):
"""Blue colors for cold sensation."""
return tuple(Color(*color) for color in cls._colors[11])
@classmethod
def benefit_harm(cls):
"""Benefit / harm study colors (red to light yellow to green)."""
return tuple(Color(*color) for color in cls._colors[12])
@classmethod
def harm(cls):
"""Harm colors (light yellow to red)."""
return tuple(Color(*color) for color in cls._colors[13])
@classmethod
def benefit(cls):
"""Benefit colors (light yellow to green)."""
return tuple(Color(*color) for color in cls._colors[14])
@classmethod
def shade_benefit_harm(cls):
"""Shade benefit / harm colors (dark red to white to dark blue)."""
return tuple(Color(*color) for color in cls._colors[15])
@classmethod
def shade_harm(cls):
"""Shade harm colors (white to dark red)."""
return tuple(Color(*color) for color in cls._colors[16])
@classmethod
def shade_benefit(cls):
"""Shade benefit colors (white to dark blue)."""
return tuple(Color(*color) for color in cls._colors[17])
@classmethod
def energy_balance(cls):
"""Energy Balance colors."""
return tuple(Color(*color) for color in cls._colors[18])
@classmethod
def energy_balance_storage(cls):
"""Energy Balance colors with a brown color for storage term."""
return tuple(Color(*color) for color in cls._colors[19])
@classmethod
def therm(cls):
"""THERM colors."""
return tuple(Color(*color) for color in cls._colors[20])
@classmethod
def cloud_cover(cls):
"""Cloud cover colors."""
return tuple(Color(*color) for color in cls._colors[21])
@classmethod
def black_to_white(cls):
"""Black to white colors."""
return tuple(Color(*color) for color in cls._colors[22])
@classmethod
def blue_green_red(cls):
"""Blue to Green to Red colors."""
return tuple(Color(*color) for color in cls._colors[23])
@classmethod
def multicolored_2(cls):
"""Multi-colored colors with less saturation."""
return tuple(Color(*color) for color in cls._colors[24])
@classmethod
def multicolored_3(cls):
"""Multi-colored colors with the least saturation."""
return tuple(Color(*color) for color in cls._colors[25])
@classmethod
def openstudio_palette(cls):
"""Standard color set for the OpenStudio surface types. Ordered as follows.
Exterior Wall, Interior Wall, Underground Wall,
Roof, Ceiling, Underground Roof,
Exposed Floor, Interior Floor, Ground Floor,
Window, Door, Shade, Air
"""
return tuple(Color(*color) for color in cls._colors[26])
def __len__(self):
"""Return length of currently installed color sets."""
return len(self._colors)
def __getitem__(self, key):
"""Return one of the color sets."""
return tuple(Color(*color) for color in self._colors[key])
def ToString(self):
"""Overwrite .NET ToString."""
return self.__repr__()
def __repr__(self):
"""Colorset representation."""
return "{} currently installed Colorsets".format(len(self))
class ColorRange(object):
"""Ladybug Color Range. Used to generate colors from numerical values.
Args:
colors: A list of colors. Colors should be input as objects with
R, G, B values. Default is Ladybug's original colorset.
domain: A list of at least two numbers to set the lower and upper
boundary of the color range. This can also be a list of more than
two values, which can be used to approximate logarithmic or other types
of color scales. However, the number of values in the domain must
always be less than or equal to the number of colors.
Default: [0, 1].
continuous_colors: Boolean. If True, the colors generated from the
color range will be in a continuous gradient. If False,
they will be categorized in incremental groups according to the
number_of_segments. Default: True for continuous colors.
Properties:
* colors
* continuous_colors
* domain
Usage:
.. code-block:: python
1.
color_range = ColorRange(continuous_colors=False)
color_range.domain = [100, 2000]
color_range.colors = [Color(75, 107, 169), Color(245, 239, 103),
Color(234, 38, 0)]
print(color_range.color(99))
print(color_range.color(100))
print(color_range.color(2000))
print(color_range.color(2001))
>> (R:75, G:107, B:169)
>> (R:245, G:239, B:103)
>> (R:245, G:239, B:103)
>> (R:234, G:38, B:0)
2.
color_range = ColorRange(continuous_colors=False)
color_range.domain = [100, 2000]
color_range.colors = [Color(75, 107, 169), Color(245, 239, 103),
Color(234, 38, 0)]
color_range.color(300)
>> (R:245, G:239, B:103)
"""
def __init__(self, colors=None, domain=None, continuous_colors=True):
"""Initiate Ladybug color range.
"""
self._continuous_colors = True if continuous_colors is None \
else continuous_colors
assert isinstance(self._continuous_colors, bool), \
"continuous_colors should be a Boolean.\nGot {}.".format(
type(continuous_colors))
self._is_domain_set = False
self.colors = colors
self.domain = domain
@classmethod
def from_dict(cls, data):
"""Create a color range from a dictionary.
Args:
data: A python dictionary in the following format
.. code-block:: python
{
"colors": [{'r': 0, 'g': 0, 'b': 0}, {'r': 255, 'g': 255, 'b': 255}],
"domain": [0, 100],
"continuous_colors": True
}
"""
optional_keys = ('colors', 'domain', 'continuous_colors')
for key in optional_keys:
if key not in data:
data[key] = None
colors = None
if data['colors'] is not None:
colors = [Color.from_dict(col) for col in data['colors']]
return cls(colors, data['domain'], data['continuous_colors'])
@property
def colors(self):
"""Get or set the colors defining the color range."""
return self._colors
@colors.setter
def colors(self, cols):
if not cols:
self._colors = Colorset.original()
else:
assert isinstance(cols, Iterable) \
and not isinstance(cols, (str, dict, bytes, bytearray)), \
'Colors should be a list or tuple. Got {}'.format(type(cols))
try:
cols = tuple(col if isinstance(col, Color) else Color(
col.R, col.G, col.B) for col in cols)
except AttributeError:
try:
cols = tuple(Color(col.Red, col.Green, col.Blue) for col in cols)
except AttributeError:
raise ValueError("{} is not a valid list of colors".format(cols))
if self._is_domain_set:
self.domain = self.domain # re-check the domain against new colors
self._colors = cols
@property
def domain(self):
"""Get or set the domain defining the color range."""
return self._domain
@domain.setter
def domain(self, dom):
# check and prepare domain
if not dom:
dom = (0, 1)
else:
assert isinstance(dom, Iterable) \
and not isinstance(dom, (str, dict, bytes, bytearray)), \
'Domain should be a list or tuple. Got {}'.format(type(dom))
for val in dom:
assert isinstance(val, (float, int)), 'Values of a domain must be ' \
'numbers. Got {}.'.format(type(val))
dom = sorted(map(float, dom))
if self._continuous_colors: # continuous
# if type is continuous domain can only be 2 values
# or at least 1 value less than number of colors
if len(dom) == 2:
# remap domain based on colors
_step = float(dom[1] - dom[0]) / (len(self._colors) - 1)
_n = dom[0]
dom = tuple(_n + c * _step for c in range(len(self._colors)))
else:
assert len(self._colors) >= len(dom), \
"For a continuous color range, the length of the domain should " \
"be 2 or greater than the number of colors."
else: # segmented
# Number of colors should be at least one more than number of domain values
assert len(self._colors) > len(dom), \
"For a segmented color range, the length of colors " + \
"should be more than the number of domain values ."
self._is_domain_set = True
self._domain = tuple(dom)
@property
def continuous_colors(self):
"""Boolean noting whether colors generated are continuous or discrete."""
return self._continuous_colors
def color(self, value):
"""Calculate a color along the range for an input value."""
if value < self._domain[0]:
return self._colors[0]
if value > self._domain[-1]:
return self._colors[-1]
# find the index of the value in domain
for count, d in enumerate(self._domain):
if d <= value <= self._domain[count + 1]:
if self._continuous_colors:
return self._cal_color(value, count)
else:
return self._colors[count + 1]
def duplicate(self):
"""Return a copy of the current color range."""
return self.__copy__()
def to_dict(self):
"""Get color range as a dictionary."""
return {
'colors': [col.to_dict() for col in self.colors],
'domain': self.domain,
'continuous_colors': self.continuous_colors,
'type': 'ColorRange'
}
def _cal_color(self, value, color_index):
"""Blend between two colors based on input value."""
range_min_p = self._domain[color_index]
range_p = self._domain[color_index + 1] - range_min_p
try:
factor = (value - range_min_p) / range_p
except ZeroDivisionError:
factor = 0
min_color = self.colors[color_index]
max_color = self.colors[color_index + 1]
red = round(factor * (max_color.r - min_color.r) + min_color.r)
green = round(factor * (max_color.g - min_color.g) + min_color.g)
blue = round(factor * (max_color.b - min_color.b) + min_color.b)
return Color(red, green, blue)
def __copy__(self):
return self.__class__(self.colors, self.domain, self.continuous_colors)
def __len__(self):
"""Return length of colors."""
return len(self._colors)
def __getitem__(self, key):
"""Return key item from the color list."""
return self._colors[key]
def __iter__(self):
"""Use colors to iterate."""
return iter(self._colors)
def ToString(self):
"""Overwrite .NET ToString."""
return self.__repr__()
def __repr__(self):
"""Color Range representation."""
return "Color Range ({} colors) (domain {})".format(len(self), self.domain)
| gpl-3.0 |
bhdouglass/clickable | tests/mocks/config.py | 1 | 1911 | from clickable.config.project import ProjectConfig
from clickable.config.constants import Constants
from clickable.config.file_helpers import InstallFiles
from clickable import __version__
from unittest.mock import Mock
class InstallFilesMock(InstallFiles):
def write_manifest(self, *args):
pass
def get_manifest(self):
return {
'version': '1.2.3',
'name': 'foo.bar',
'architecture': '@CLICK_ARCH@',
'hooks': {
'foo': {
'desktop': '/fake/foo.desktop',
},
},
}
class ConfigMock(ProjectConfig):
def __init__(self,
mock_config_json=None,
mock_config_env=None,
mock_install_files=False,
*args, **kwargs):
self.mock_config_json = mock_config_json
self.mock_config_env = mock_config_env
self.mock_install_files = mock_install_files
super().__init__(clickable_version=__version__, *args, **kwargs)
def load_json_config(self, config_path):
if self.mock_config_json is None:
return super().load_json_config(config_path)
else:
config_json = self.mock_config_json
return config_json
def get_env_var(self, key):
if self.mock_config_env is None:
return super().get_env_var(key)
else:
return self.mock_config_env.get(key, None)
def set_builder_interactive(self):
if not self.config['builder'] and not self.needs_builder():
self.config["builder"] = Constants.PURE
def setup_helpers(self):
super().setup_helpers()
if self.mock_install_files:
self.install_files = InstallFilesMock(
self.config['install_dir'],
self.config['builder'],
self.config['arch'])
| gpl-3.0 |
cxxgtxy/tensorflow | tensorflow/python/estimator/canned/dnn.py | 3 | 15021 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _add_hidden_layer_summary(value, tag):
summary.scalar('%s_fraction_of_zero_values' % tag, nn.zero_fraction(value))
summary.histogram('%s_activation' % tag, value)
def _dnn_model_fn(
features, labels, mode, head, hidden_units, feature_columns,
optimizer='Adagrad', activation_fn=nn.relu, dropout=None,
input_layer_partitioner=None, config=None):
"""Deep Neural Net model_fn.
Args:
features: Dict of `Tensor` (depends on data passed to `train`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `head_lib._Head` instance.
hidden_units: Iterable of integer number of hidden units per layer.
feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.
optimizer: String, `tf.Optimizer` object, or callable that creates the
optimizer to use for training. If not specified, will use the Adagrad
optimizer with a default learning rate of 0.05.
activation_fn: Activation function applied to each layer.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
optimizer = optimizers.get_optimizer_instance(
optimizer, learning_rate=_LEARNING_RATE)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
'dnn',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_scope(
'input_from_feature_columns',
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner):
net = feature_column_lib.input_layer(
features=features,
feature_columns=feature_columns)
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
'hiddenlayer_%d' % layer_id,
values=(net,)) as hidden_layer_scope:
net = core_layers.dense(
net,
units=num_hidden_units,
activation=activation_fn,
kernel_initializer=init_ops.glorot_uniform_initializer(),
name=hidden_layer_scope)
if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = core_layers.dropout(net, rate=dropout, training=True)
_add_hidden_layer_summary(net, hidden_layer_scope.name)
with variable_scope.variable_scope(
'logits',
values=(net,)) as logits_scope:
logits = core_layers.dense(
net,
units=head.logits_dimension,
activation=None,
kernel_initializer=init_ops.glorot_uniform_initializer(),
name=logits_scope)
_add_hidden_layer_summary(logits, logits_scope.name)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
class DNNClassifier(estimator.Estimator):
"""A classifier for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_feature_key` is not `None`, a feature with
`key=weight_feature_key` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_feature_key=None,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
"""Initializes a `DNNClassifier` instance.
Args:
hidden_units: Iterable of number hidden units per layer. All layers are
fully connected. Ex. `[64, 32]` means first layer has 64 nodes and
second one has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `_FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_feature_key: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
"""
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_feature_key=weight_feature_key)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes, weight_feature_key=weight_feature_key)
def _model_fn(features, labels, mode, config):
return _dnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
hidden_units=hidden_units,
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
class DNNRegressor(estimator.Estimator):
"""A regressor for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNRegressor(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_feature_key` is not `None`, a feature with
`key=weight_feature_key` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_feature_key=None,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: Iterable of number hidden units per layer. All layers are
fully connected. Ex. `[64, 32]` means first layer has 64 nodes and
second one has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `_FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_feature_key: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
"""
def _model_fn(features, labels, mode, config):
return _dnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head_lib._regression_head_with_mean_squared_error_loss( # pylint: disable=protected-access
label_dimension=label_dimension,
weight_feature_key=weight_feature_key),
hidden_units=hidden_units,
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
| apache-2.0 |
ychen820/microblog | src/lib/flask_debugtoolbar/__init__.py | 14 | 6925 | import os
from flask import current_app, request, g
from flask.globals import _request_ctx_stack
from flask import send_from_directory
from jinja2 import Environment, PackageLoader
from werkzeug.exceptions import HTTPException
from werkzeug.urls import url_quote_plus
from flask_debugtoolbar.toolbar import DebugToolbar
from flask import Blueprint
module = Blueprint('debugtoolbar', __name__)
def replace_insensitive(string, target, replacement):
"""Similar to string.replace() but is case insensitive
Code borrowed from: http://forums.devshed.com/python-programming-11/case-insensitive-string-replace-490921.html
"""
no_case = string.lower()
index = no_case.rfind(target.lower())
if index >= 0:
return string[:index] + replacement + string[index + len(target):]
else: # no results so return the original string
return string
def _printable(value):
if isinstance(value, unicode):
return value.encode('unicode_escape')
elif isinstance(value, str):
return value.encode('string_escape')
else:
return repr(value)
class DebugToolbarExtension(object):
_static_dir = os.path.realpath(
os.path.join(os.path.dirname(__file__), 'static'))
_redirect_codes = [301, 302, 303, 304]
def __init__(self, app):
self.app = app
self.debug_toolbars = {}
self.hosts = ()
if not app.config.get('DEBUG_TB_ENABLED', app.debug):
return
if not app.config.get('SECRET_KEY'):
raise RuntimeError(
"The Flask-DebugToolbar requires the 'SECRET_KEY' config "
"var to be set")
DebugToolbar.load_panels(app)
self.hosts = app.config.get('DEBUG_TB_HOSTS', ())
self.app.before_request(self.process_request)
self.app.after_request(self.process_response)
self.app.teardown_request(self.teardown_request)
# Monkey-patch the Flask.dispatch_request method
app.dispatch_request = self.dispatch_request
# Configure jinja for the internal templates and add url rules
# for static data
self.jinja_env = Environment(
autoescape=True,
extensions=['jinja2.ext.i18n', 'jinja2.ext.with_'],
loader=PackageLoader(__name__, 'templates'))
self.jinja_env.filters['urlencode'] = url_quote_plus
self.jinja_env.filters['printable'] = _printable
app.add_url_rule('/_debug_toolbar/static/<path:filename>',
'_debug_toolbar.static', self.send_static_file)
app.register_blueprint(module, url_prefix='/_debug_toolbar/views')
def dispatch_request(self):
"""Modified version of Flask.dispatch_request to call process_view."""
req = _request_ctx_stack.top.request
app = current_app
if req.routing_exception is not None:
app.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return app.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
view_func = app.view_functions[rule.endpoint]
view_func = self.process_view(app, view_func, req.view_args)
return view_func(**req.view_args)
def _show_toolbar(self):
"""Return a boolean to indicate if we need to show the toolbar."""
if request.path.startswith('/_debug_toolbar/'):
return False
if self.hosts and request.remote_addr not in self.hosts:
return False
return True
def send_static_file(self, filename):
"""Send a static file from the flask-debugtoolbar static directory."""
return send_from_directory(self._static_dir, filename)
def process_request(self):
g.debug_toolbar = self
if not self._show_toolbar():
return
real_request = request._get_current_object()
self.debug_toolbars[real_request] = DebugToolbar(real_request, self.jinja_env)
for panel in self.debug_toolbars[real_request].panels:
panel.process_request(real_request)
def process_view(self, app, view_func, view_kwargs):
""" This method is called just before the flask view is called.
This is done by the dispatch_request method.
"""
real_request = request._get_current_object()
if real_request in self.debug_toolbars:
for panel in self.debug_toolbars[real_request].panels:
new_view = panel.process_view(real_request, view_func, view_kwargs)
if new_view:
view_func = new_view
return view_func
def process_response(self, response):
real_request = request._get_current_object()
if real_request not in self.debug_toolbars:
return response
# Intercept http redirect codes and display an html page with a
# link to the target.
if self.debug_toolbars[real_request].config['DEBUG_TB_INTERCEPT_REDIRECTS']:
if response.status_code in self._redirect_codes:
redirect_to = response.location
redirect_code = response.status_code
if redirect_to:
content = self.render('redirect.html', {
'redirect_to': redirect_to,
'redirect_code': redirect_code
})
response.content_length = len(content)
response.location = None
response.response = [content]
response.status_code = 200
# If the http response code is 200 then we process to add the
# toolbar to the returned html response.
if (response.status_code == 200
and response.headers['content-type'].startswith('text/html')):
for panel in self.debug_toolbars[real_request].panels:
panel.process_response(real_request, response)
if response.is_sequence:
response_html = response.data.decode(response.charset)
toolbar_html = self.debug_toolbars[real_request].render_toolbar()
content = replace_insensitive(
response_html, '</body>', toolbar_html + '</body>')
content = content.encode(response.charset)
response.response = [content]
response.content_length = len(content)
return response
def teardown_request(self, exc):
self.debug_toolbars.pop(request._get_current_object(), None)
def render(self, template_name, context):
template = self.jinja_env.get_template(template_name)
return template.render(**context)
| bsd-3-clause |
LogicalKnight/pywinauto | pywinauto/tests/comboboxdroppedheight.py | 17 | 2469 | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"""ComboBox dropped height Test
**What is checked**
It is ensured that the height of the list displayed when the combobox is
dropped down is not less than the height of the reference.
**How is it checked**
The value for the dropped rectangle can be retrieved from windows. The height
of this rectangle is calculated and compared against the reference height.
**When is a bug reported**
If the height of the dropped rectangle for the combobox being checked is less
than the height of the reference one then a bug is reported.
**Bug Extra Information**
There is no extra information associated with this bug type
**Is Reference dialog needed**
The reference dialog is necessary for this test.
**False positive bug reports**
No false bugs should be reported. If the font of the localised control has a
smaller height than the reference then it is possible that the dropped
rectangle could be of a different size.
**Test Identifier**
The identifier for this test/bug is "ComboBoxDroppedHeight"
"""
__revision__ = "$Revision$"
testname = "ComboBoxDroppedHeight"
def ComboBoxDroppedHeightTest(windows):
"Check if each combobox height is the same as the reference"
bugs = []
for win in windows:
if not win.ref:
continue
if win.Class() != "ComboBox" or win.ref.Class() != "ComboBox":
continue
if win.DroppedRect().height() != win.ref.DroppedRect().height():
bugs.append((
[win, ],
{},
testname,
0,)
)
return bugs
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.