filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_16454
|
############################################################
# #
# hprose #
# #
# Official WebSite: http://www.hprose.com/ #
# http://www.hprose.org/ #
# #
############################################################
############################################################
# #
# hprose/server.py #
# #
# hprose server for python 3.0+ #
# #
# LastModified: Mar 8, 2015 #
# Author: Ma Bingyao <[email protected]> #
# #
############################################################
import types, traceback
from io import BytesIO
from sys import modules, exc_info
from hprose.io import HproseTags, HproseWriter, HproseReader
from hprose.common import HproseResultMode, HproseException
def _getInstanceMethods(cls):
v = vars(cls)
return [name for name in v if isinstance(v[name], types.FunctionType)]
def _getClassMethods(cls):
v = vars(cls)
return [name for name in v if isinstance(v[name], classmethod)]
def _getStaticMethods(cls):
v = vars(cls)
return [name for name in v if isinstance(v[name], staticmethod)]
class HproseService(object):
def __init__(self):
self.__functions = {}
self.__funcNames = {}
self.__resultMode = {}
self.__simpleMode = {}
self.__filters = []
self.debug = False
self.simple = False
self.onBeforeInvoke = None
self.onAfterInvoke = None
self.onSendHeader = None
self.onSendError = None
def getFilter(self):
if (len(self.__filters) == 0):
return None
return self.__filters[0]
def setFilter(self, _filter):
self.__filters = []
if _filter != None:
self.__filters.append(_filter)
filter = property(fget = getFilter, fset = setFilter)
def addFilter(self, _filter):
self.__filters.append(_filter)
def removeFilter(self, _filter):
self.__filters.remove(_filter)
def __inputFilter(self, data, context):
for _filter in reversed(self.__filters):
data = _filter.inputFilter(data, context)
return data
def __outputFilter(self, data, context):
for _filter in self.__filters:
data = _filter.outputFilter(data, context)
return data
def _responseEnd(self, ostream, context):
data = self.__outputFilter(ostream.getvalue(), context)
ostream.close()
return data
def _fixArgs(self, args, function, context):
if hasattr(function, '__code__'):
fc = function.__code__
c = fc.co_argcount
if (len(args) + 1 == c) and (c > 0) and (fc.co_varnames[c - 1] == 'context'):
args.append(context)
return args
def _fireBeforeInvokeEvent(self, name, args, byref, context):
if self.onBeforeInvoke != None:
if hasattr(self.onBeforeInvoke, '__code__'):
argcount = self.onBeforeInvoke.__code__.co_argcount
if argcount == 4:
self.onBeforeInvoke(name, args, byref, context)
elif argcount == 3:
self.onBeforeInvoke(name, args, byref)
elif argcount == 2:
self.onBeforeInvoke(name, args)
elif argcount == 1:
self.onBeforeInvoke(name)
elif argcount == 0:
self.onBeforeInvoke()
else:
self.onBeforeInvoke(name, args, byref, context)
def _fireAfterInvokeEvent(self, name, args, byref, result, context):
if self.onAfterInvoke != None:
if hasattr(self.onAfterInvoke, '__code__'):
argcount = self.onAfterInvoke.__code__.co_argcount
if argcount == 5:
self.onAfterInvoke(name, args, byref, result, context)
elif argcount == 4:
self.onAfterInvoke(name, args, byref, result)
elif argcount == 3:
self.onAfterInvoke(name, args, byref)
elif argcount == 2:
self.onAfterInvoke(name, args)
elif argcount == 1:
self.onAfterInvoke(name)
elif argcount == 0:
self.onAfterInvoke()
else:
self.onAfterInvoke(name, args, byref, result, context)
def _fireErrorEvent(self, e, context):
if self.onSendError != None:
if hasattr(self.onSendError, '__code__'):
argcount = self.onSendError.__code__.co_argcount
if argcount == 2:
self.onSendError(e, context)
elif argcount == 1:
self.onSendError(e)
elif argcount == 0:
self.onSendError()
else:
self.onSendError(e, context)
def _doError(self, e, context):
self._fireErrorEvent(e, context)
if self.debug:
e = ''.join(traceback.format_exception(*exc_info()))
ostream = BytesIO()
writer = HproseWriter(ostream, True)
ostream.write(HproseTags.TagError)
writer.writeString(str(e).encode('utf-8'))
ostream.write(HproseTags.TagEnd)
return self._responseEnd(ostream, context)
def _doInvoke(self, istream, context):
simpleReader = HproseReader(istream, True)
tag = HproseTags.TagCall
while tag == HproseTags.TagCall:
name = simpleReader.readString()
aliasname = name.lower()
args = []
byref = False
tag = simpleReader.checkTags((HproseTags.TagList,
HproseTags.TagEnd,
HproseTags.TagCall))
if tag == HproseTags.TagList:
reader = HproseReader(istream)
args = reader.readListWithoutTag()
tag = reader.checkTags((HproseTags.TagTrue,
HproseTags.TagEnd,
HproseTags.TagCall))
if (tag == HproseTags.TagTrue):
byref = True
tag = reader.checkTags((HproseTags.TagEnd,
HproseTags.TagCall))
self._fireBeforeInvokeEvent(name, args, byref, context)
if aliasname in self.__functions:
function = self.__functions[aliasname]
resultMode = self.__resultMode[aliasname]
simple = self.__simpleMode[aliasname]
result = function(*self._fixArgs(args, function, context))
elif '*' in self.__functions:
function = self.__functions['*']
resultMode = self.__resultMode['*']
simple = self.__simpleMode['*']
result = function(name, args)
else:
raise HproseException("Can't find this function %s()." % name)
self._fireAfterInvokeEvent(name, args, byref, result, context)
ostream = BytesIO()
if resultMode == HproseResultMode.RawWithEndTag:
return self.__outputFilter(result, context)
if resultMode == HproseResultMode.Raw:
ostream.write(result)
else:
ostream.write(HproseTags.TagResult)
if resultMode == HproseResultMode.Serialized:
ostream.write(result)
else:
if simple == None: simple = self.simple
writer = HproseWriter(ostream, simple)
writer.serialize(result)
if byref:
ostream.write(HproseTags.TagArgument)
writer.reset()
writer.writeList(args)
ostream.write(HproseTags.TagEnd)
return self._responseEnd(ostream, context)
def _doFunctionList(self, context):
ostream = BytesIO()
writer = HproseWriter(ostream, True)
ostream.write(HproseTags.TagFunctions)
writer.writeView(self.__funcNames.values())
ostream.write(HproseTags.TagEnd)
return self._responseEnd(ostream, context)
def _handle(self, data, context):
istream = None
try:
data = self.__inputFilter(data, context)
if data == None or data == b'' or data[len(data) - 1:] != HproseTags.TagEnd:
raise HproseException("Wrong Request: \r\n%s" % str(data, 'utf-8'))
istream = BytesIO(data)
tag = istream.read(1)
if tag == HproseTags.TagCall:
return self._doInvoke(istream, context)
elif tag == HproseTags.TagEnd:
return self._doFunctionList(context)
else:
raise HproseException("Wrong Request: \r\n%s" % str(data, 'utf-8'))
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
return self._doError(e, context)
finally:
if istream != None: istream.close()
def addMissingFunction(self, function, resultMode = HproseResultMode.Normal, simple = None):
self.addFunction(function, '*', resultMode, simple)
def addFunction(self, function, alias = None, resultMode = HproseResultMode.Normal, simple = None):
if isinstance(function, str):
function = getattr(modules['__main__'], function, None)
if not hasattr(function, '__call__'):
raise HproseException('Argument function is not callable')
if alias == None:
alias = function.__name__
if isinstance(alias, str):
aliasname = alias.lower()
self.__functions[aliasname] = function
self.__funcNames[aliasname] = alias
self.__resultMode[aliasname] = resultMode
self.__simpleMode[aliasname] = simple
else:
raise HproseException('Argument alias is not a string')
def addFunctions(self, functions, aliases = None, resultMode = HproseResultMode.Normal, simple = None):
aliases_is_null = (aliases == None)
if not isinstance(functions, (list, tuple)):
raise HproseException('Argument functions is not a list or tuple')
count = len(functions)
if not aliases_is_null and count != len(aliases):
raise HproseException('The count of functions is not matched with aliases')
for i in range(count):
function = functions[i]
if aliases_is_null:
self.addFunction(function, None, resultMode, simple)
else:
self.addFunction(function, aliases[i], resultMode, simple)
def addMethod(self, methodname, belongto, alias = None, resultMode = HproseResultMode.Normal, simple = None):
function = getattr(belongto, methodname, None)
if alias == None:
self.addFunction(function, methodname, resultMode, simple)
else:
self.addFunction(function, alias, resultMode, simple)
def addMethods(self, methods, belongto, aliases = None, resultMode = HproseResultMode.Normal, simple = None):
aliases_is_null = (aliases == None)
if not isinstance(methods, (list, tuple)):
raise HproseException('Argument methods is not a list or tuple')
if isinstance(aliases, str):
aliasPrefix = aliases
aliases = [aliasPrefix + '_' + name for name in methods]
count = len(methods)
if not aliases_is_null and count != len(aliases):
raise HproseException('The count of methods is not matched with aliases')
for i in range(count):
method = methods[i]
function = getattr(belongto, method, None)
if aliases_is_null:
self.addFunction(function, method, resultMode, simple)
else:
self.addFunction(function, aliases[i], resultMode, simple)
def addInstanceMethods(self, obj, cls = None, aliasPrefix = None, resultMode = HproseResultMode.Normal, simple = None):
if cls == None: cls = obj.__class__
self.addMethods(_getInstanceMethods(cls), obj, aliasPrefix, resultMode, simple)
def addClassMethods(self, cls, execcls = None, aliasPrefix = None, resultMode = HproseResultMode.Normal, simple = None):
if execcls == None: execcls = cls
self.addMethods(_getClassMethods(cls), execcls, aliasPrefix, resultMode, simple)
def addStaticMethods(self, cls, aliasPrefix = None, resultMode = HproseResultMode.Normal, simple = None):
self.addMethods(_getStaticMethods(cls), cls, aliasPrefix, resultMode, simple)
def add(self, *args):
args_num = len(args)
if args_num == 1:
if isinstance(args[0], (tuple, list)):
self.addFunctions(args[0])
elif isinstance(args[0], type):
self.addClassMethods(args[0])
self.addStaticMethods(args[0])
elif hasattr(args[0], '__call__'):
self.addFunction(args[0])
else:
self.addInstanceMethods(args[0])
elif args_num == 2:
if isinstance(args[0], type):
if isinstance(args[1], type):
self.addClassMethods(args[0], args[1])
else:
self.addClassMethods(args[0], args[0], args[1])
self.addStaticMethods(args[0], args[1])
elif isinstance(args[0], str):
if isinstance(args[1], str):
self.addFunction(args[0], args[1])
else:
self.addMethod(args[0], args[1])
elif isinstance(args[0], (tuple, list)):
if isinstance(args[1], (tuple, list)):
self.addFunctions(args[0], args[1])
else:
self.addMethods(args[0], args[1])
elif hasattr(args[0], '__call__') and isinstance(args[1], str):
self.addFunction(args[0], args[1])
elif isinstance(args[1], str):
self.addInstanceMethods(args[0], None, args[1])
else:
self.addInstanceMethods(args[0], args[1])
elif args_num == 3:
if isinstance(args[0], str) and isinstance(args[2], str):
if args[1] == None:
self.addFunction(args[0], args[2])
else:
self.addMethod(args[0], args[1], args[2])
elif isinstance(args[0], (tuple, list)):
if isinstance(args[2], (tuple, list)) and args[1] == None:
self.addFunctions(args[0], args[2])
else:
self.addMethods(args[0], args[1], args[2])
elif isinstance(args[1], type) and isinstance(args[2], str):
if isinstance(args[0], type):
self.addClassMethods(args[0], args[1], args[2])
else:
self.addInstanceMethods(args[0], args[1], args[2])
elif hasattr(args[0], '__call__') and args[1] == None and isinstance(args[2], str):
self.addFunction(args[0], args[2])
else:
raise HproseException('Wrong arguments')
else:
raise HproseException('Wrong arguments')
|
the-stack_106_16455
|
import numpy as np
from scipy.signal import medfilt
import cv2
import os.path as op
from skimage import transform
"""
The image had better be squared... And width and height can both be divided by 4
"""
import gist
from IPython import embed
def get_gist_C_implementation(img, mask=None):
"""
Extract GIST descriptor of an image. Implemented by C.
This implementation cannot change orientation and filter num, but it's really faster than MATLAB.
"""
_img = transform.resize(img, (224, 224), preserve_range=True).astype('uint8')
_mask = transform.resize(mask, (224, 224), preserve_range=True).astype('uint8')
if mask is None:
return gist.extract(_img)
_img[_mask > 0] = 0
# 1440 = 3 * (6 * 5) * 4 * 4 for colored imgs
descriptor = gist.extract(_img).reshape((3, 30, 4, 4))
weight = np.zeros((4, 4)).astype('float32')
unity, unitx = mask.shape[0] // 4, mask.shape[1] // 4
for _y in range(4):
for _x in range(4):
weight[_y, _x] = np.sum(mask[_y * unity: (_y + 1) * unity, _x * unitx: (_x + 1) * unitx] == 0) / (unity * unitx)
for c in range(3):
for i in range(30):
descriptor[c, i] *= weight
return descriptor.reshape((-1, ))
import matlab.engine
def get_gist_MATLAB_implementation(img, mask=None):
"""
Antonio's original code. To switch different orientation and filter num.
GIST feature order: (c, w, h)
Attention: This mask is the original mask, not the mask of the border area
"""
_img = img.copy()
if mask is not None:
_mask = np.expand_dims(mask, axis=2)
_mask = np.concatenate((_mask, _mask, _mask), axis=2)
_img[_mask > 0] = 0
cv2.imwrite("./gist_Antonio/input.png", _img)
eng = matlab.engine.start_matlab()
eng.cd("{}/gist_Antonio".format(op.abspath('.')))
eng.run("getGIST", nargout=0)
if mask is None:
return np.array(eng.workspace['gist'])
# 480 = (6 * 5) * 4 * 4
descriptor = np.array(eng.workspace['gist']).reshape((30, 4, 4))
weight = np.zeros((4, 4)).astype('float32')
unity, unitx = mask.shape[0] // 4, mask.shape[1] // 4
for _y in range(4):
for _x in range(4):
weight[_y, _x] = np.sum(mask[_y * unity: (_y + 1) * unity, _x * unitx: (_x + 1) * unitx] > 0) / (unity * unitx)
for i in range(30):
descriptor[i] *= weight
return descriptor.reshape((-1, ))
def get_texture(img):
"""
Median filter
"""
return medfilt(img, np.array([5, 5, 1]))
def get_lab(img):
"""
BGR2L*a*b*
"""
return cv2.cvtColor(img, cv2.COLOR_BGR2Lab)
if __name__ == "__main__":
img = cv2.imread("./dataset/raw_image/test.bmp")
mask = cv2.imread("./dataset/raw_image/testmask.bmp")[:, :, 0]
mask[mask > 0] = 1
print(get_gist_MATLAB_implementation(img, mask).shape)
|
the-stack_106_16456
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Truth Table-based Quantum Oracle.
"""
from typing import Union, List
import logging
import operator
import math
from functools import reduce
import numpy as np
from dlx import DLX
from sympy import symbols
from sympy.logic.boolalg import Xor, And
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.aqua import AquaError
from qiskit.aqua.circuits import ESOP
from qiskit.aqua.components.oracles import Oracle
from qiskit.aqua.utils.arithmetic import is_power_of_2
from qiskit.aqua.utils.validation import validate_in_set
from .ast_utils import get_ast
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class TruthTableOracle(Oracle):
"""
The Truth Table-based Quantum Oracle.
Besides logical expressions, (see :class:`LogicalExpressionOracle`) another common way of
specifying boolean functions is using truth tables, which is basically an exhaustive mapping
from input binary bit-strings of length :math:`n` to corresponding output bit-strings of
length :math:`m`. For example, the following is a simple truth table that corresponds to
the `XOR` of two variables:
===== ===== =============
Inputs Output
------------ -------------
A B A xor B
===== ===== =============
0 0 0
0 1 1
1 0 1
1 1 0
===== ===== =============
In this case :math:`n=2`, and :math:`m=1`. Often, for brevity, the input bit-strings are
omitted because they can be easily derived for any given :math:`n`. So to completely specify a
truth table, we only need a Length-2 :sup:`n` bit-string for each of the :math:`m` outputs.
In the above example, a single bit-string `'0110'` would suffice. Besides `'0'` and `'1'`,
one can also use `'x'` in the output string to indicate `'do-not-care'` entries.
For example, `'101x'` specifies a truth table (again :math:`n=2` and :math:`m=1`)
for which the output upon input `'11'` doesn't matter. The truth table oracle takes either a
single string or a list of equal-length strings for truth table specifications.
Regarding circuit optimization and mct usages, the truth table oracle is similar to the
:class:`LogicalExpressionOracle`. One difference is that, unlike the logical expression oracle
which builds circuits out of CNF or DNF, the truth table oracle uses Exclusive Sum of Products
(ESOP), which is similar to DNF, with the only difference being the outermost operation
being `XOR` as opposed to a disjunction. Because of this difference, an implicant-based method
is used here for circuit optimization: First, the `Quine-McCluskey algorithm
<https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm>`__ is used to find all prime
implicants of the input truth table; then an `Exact Cover
<https://en.wikipedia.org/wiki/Knuth%27s_Algorithm_X>`__ is found among all prime implicants
and truth table onset row entries. The exact cover is then used to build the
corresponding oracle circuit.
"""
def __init__(self,
bitmaps: Union[str, List[str]],
optimization: bool = False,
mct_mode: str = 'basic'):
"""
Args:
bitmaps: A single binary string or a list of binary strings
representing the desired single- and multi-value truth table.
optimization: Boolean flag for attempting circuit optimization.
When set, the Quine-McCluskey algorithm is used to compute the prime
implicants of the truth table,
and then its exact cover is computed to try to reduce the circuit.
mct_mode: The mode to use when constructing multiple-control Toffoli.
Raises:
AquaError: Invalid input
"""
if isinstance(bitmaps, str):
bitmaps = [bitmaps]
validate_in_set('mct_mode', mct_mode,
{'basic', 'basic-dirty-ancilla',
'advanced', 'noancilla'})
super().__init__()
self._mct_mode = mct_mode.strip().lower()
self._optimization = optimization
self._bitmaps = bitmaps
# check that the input bitmaps length is a power of 2
if not is_power_of_2(len(bitmaps[0])):
raise AquaError('Length of any bitmap must be a power of 2.')
for bitmap in bitmaps[1:]:
if not len(bitmap) == len(bitmaps[0]):
raise AquaError('Length of all bitmaps must be the same.')
self._nbits = int(math.log(len(bitmaps[0]), 2))
self._num_outputs = len(bitmaps)
self._lit_to_var = None
self._var_to_lit = None
esop_exprs = []
for bitmap in bitmaps:
esop_expr = self._get_esop_ast(bitmap)
esop_exprs.append(esop_expr)
self._esops = [
ESOP(esop_expr, num_vars=self._nbits) for esop_expr in esop_exprs
] if esop_exprs else None
self.construct_circuit()
def _get_esop_ast(self, bitmap):
v = symbols('v:{}'.format(self._nbits))
if self._lit_to_var is None:
self._lit_to_var = [None] + sorted(v, key=str)
if self._var_to_lit is None:
self._var_to_lit = dict(zip(self._lit_to_var[1:], range(1, self._nbits + 1)))
def binstr_to_vars(binstr):
return [(~v[x[1] - 1] if x[0] == '0' else v[x[1] - 1])
for x in zip(binstr, reversed(range(1, self._nbits + 1)))][::-1]
if not self._optimization:
expression = Xor(*[
And(*binstr_to_vars(term)) for term in
[np.binary_repr(idx, self._nbits) for idx, v in enumerate(bitmap) if v == '1']
])
else:
ones = [i for i, v in enumerate(bitmap) if v == '1']
if not ones:
return 'const', 0
dcs = [i for i, v in enumerate(bitmap) if v == '*' or v == '-' or v.lower() == 'x']
pis = get_prime_implicants(ones=ones, dcs=dcs)
cover = get_exact_covers(ones, pis)[-1]
clauses = []
for c in cover:
if len(c) == 1:
term = np.binary_repr(c[0], self._nbits)
clause = And(*[
v for i, v in enumerate(binstr_to_vars(term))
])
elif len(c) > 1:
c_or = reduce(operator.or_, c)
c_and = reduce(operator.and_, c)
_ = np.binary_repr(c_and ^ c_or, self._nbits)[::-1]
clause = And(*[
v for i, v in enumerate(binstr_to_vars(np.binary_repr(c_and, self._nbits)))
if _[i] == '0'])
else:
raise AquaError('Unexpected cover term size {}.'.format(len(c)))
if clause:
clauses.append(clause)
expression = Xor(*clauses)
ast = get_ast(self._var_to_lit, expression)
if ast is not None:
return ast
else:
return 'const', 0
@property
def variable_register(self):
""" returns variable register """
return self._variable_register
@property
def ancillary_register(self):
""" returns ancillary register """
return self._ancillary_register
@property
def output_register(self):
""" returns output register """
return self._output_register
def construct_circuit(self):
""" construct circuit """
if self._circuit is not None:
return self._circuit
self._circuit = QuantumCircuit()
self._output_register = QuantumRegister(self._num_outputs, name='o')
if self._esops:
for i, e in enumerate(self._esops):
if e is not None:
ci = e.construct_circuit(
output_register=self._output_register,
output_idx=i,
mct_mode=self._mct_mode
)
self._circuit += ci
self._variable_register = self._ancillary_register = None
for qreg in self._circuit.qregs:
if qreg.name == 'v':
self._variable_register = qreg
elif qreg.name == 'a':
self._ancillary_register = qreg
else:
self._variable_register = QuantumRegister(self._nbits, name='v')
self._ancillary_register = None
self._circuit.add_register(self._variable_register, self._output_register)
return self._circuit
def evaluate_classically(self, measurement):
""" evaluate classical """
assignment = [(var + 1) * (int(tf) * 2 - 1) for tf, var in zip(measurement[::-1],
range(len(measurement)))]
ret = [bitmap[int(measurement, 2)] == '1' for bitmap in self._bitmaps]
if self._num_outputs == 1:
return ret[0], assignment
else:
return ret, assignment
def get_prime_implicants(ones=None, dcs=None):
"""
Compute all prime implicants for a truth table using the Quine-McCluskey Algorithm
Args:
ones (list of int): The list of integers corresponding to '1' outputs
dcs (list of int): The list of integers corresponding to don't-cares
Return:
list: list of lists of int, representing all prime implicants
"""
def combine_terms(terms, num1s_dict=None):
if num1s_dict is None:
num1s_dict = {}
for num in terms:
num1s = bin(num).count('1')
if num1s not in num1s_dict:
num1s_dict[num1s] = [num]
else:
num1s_dict[num1s].append(num)
new_implicants = {}
new_num1s_dict = {}
prime_dict = {mt: True for mt in sorted(terms)}
cur_num1s, max_num1s = min(num1s_dict.keys()), max(num1s_dict.keys())
while cur_num1s < max_num1s:
if cur_num1s in num1s_dict and (cur_num1s + 1) in num1s_dict:
for cur_term in sorted(num1s_dict[cur_num1s]):
for next_term in sorted(num1s_dict[cur_num1s + 1]):
if isinstance(cur_term, int):
diff_mask = dc_mask = cur_term ^ next_term
implicant_mask = cur_term & next_term
elif isinstance(cur_term, tuple):
if terms[cur_term][1] == terms[next_term][1]:
diff_mask = terms[cur_term][0] ^ terms[next_term][0]
dc_mask = diff_mask | terms[cur_term][1]
implicant_mask = terms[cur_term][0] & terms[next_term][0]
else:
continue
else:
raise AquaError('Unexpected type: {}.'.format(type(cur_term)))
if bin(diff_mask).count('1') == 1:
prime_dict[cur_term] = False
prime_dict[next_term] = False
if isinstance(cur_term, int):
cur_implicant = (cur_term, next_term)
elif isinstance(cur_term, tuple):
cur_implicant = tuple(sorted((*cur_term, *next_term)))
else:
raise AquaError('Unexpected type: {}.'.format(type(cur_term)))
new_implicants[cur_implicant] = (
implicant_mask,
dc_mask
)
num1s = bin(implicant_mask).count('1')
if num1s not in new_num1s_dict:
new_num1s_dict[num1s] = [cur_implicant]
else:
if cur_implicant not in new_num1s_dict[num1s]:
new_num1s_dict[num1s].append(cur_implicant)
cur_num1s += 1
return new_implicants, new_num1s_dict, prime_dict
terms = ones + dcs
cur_num1s_dict = None
prime_implicants = []
while True:
next_implicants, next_num1s_dict, cur_prime_dict = combine_terms(terms,
num1s_dict=cur_num1s_dict)
for implicant in cur_prime_dict:
if cur_prime_dict[implicant]:
if isinstance(implicant, int):
if implicant not in dcs:
prime_implicants.append((implicant,))
else:
if not set.issubset(set(implicant), dcs):
prime_implicants.append(implicant)
if next_implicants:
terms = next_implicants
cur_num1s_dict = next_num1s_dict
else:
break
return prime_implicants
def get_exact_covers(cols, rows, num_cols=None):
"""
Use Algorithm X to get all solutions to the exact cover problem
https://en.wikipedia.org/wiki/Knuth%27s_Algorithm_X
Args:
cols (list[int]): A list of integers representing the columns to be covered
rows (list[list[int]]): A list of lists of integers representing the rows
num_cols (int): The total number of columns
Returns:
list: All exact covers
"""
if num_cols is None:
num_cols = max(cols) + 1
ec = DLX([(c, 0 if c in cols else 1) for c in range(num_cols)])
ec.appendRows([[c] for c in cols])
ec.appendRows(rows)
exact_covers = []
for s in ec.solve():
cover = []
for i in s:
cover.append(ec.getRowList(i))
exact_covers.append(cover)
return exact_covers
|
the-stack_106_16459
|
from dart.client.python.dart_client import Dart
from dart.model.datastore import Datastore, DatastoreState
if __name__ == '__main__':
dart = Dart('localhost', 5000)
assert isinstance(dart, Dart)
datastore = dart.get_datastore('KNMUGQWTHT')
assert isinstance(datastore, Datastore)
datastore.data.state = DatastoreState.ACTIVE
dart.save_datastore(datastore)
|
the-stack_106_16460
|
from typing import Any
from pandas_ml import ConfusionMatrix
import sklearn.metrics as metrics
from tabulate import tabulate
from collections import defaultdict
from expanded_checklist.checklist.utils import \
DataShape, ACCUMULATED_STR, FlattenGroup
from ..abstract_tests import ClassificationMetric
class BasicClassificationMetrics(ClassificationMetric):
def __init__(self) -> None:
"""
The test that calculates a number of different classic metrics for
classification -- the results from this test can be further used to
calculate other metrics (e.g. see equality difference).
"""
super().__init__('class_metrics', required_ds=DataShape.GROUPED)
def get_binary_class_results(self, labels, preds, confs):
# get all metrics from pandas ml confusion matrix
cm = ConfusionMatrix(labels, preds)
stats = cm.stats()
stats["roc_auc"] = metrics.roc_auc_score(labels, preds)
stats["F1"] = stats["F1_score"]
return stats
def cross_class_accumulation(
self, class_results, labels, preds, confs
) -> Any:
to_accumulate = ["TPR", "PPV", "F1_score"]
accumulated = defaultdict(list)
for cl in range(self.n_classes):
if cl not in class_results:
continue
res = class_results[cl]
for key in to_accumulate:
if key not in res:
continue
accumulated[key].append(res[key])
to_ret = {}
for key, scores in accumulated.items():
to_ret[f"macro_{key}"] = sum(scores)/len(scores)
# calculate accuracy on the original labels and preds (all classes)
to_ret["Accuracy"] = metrics.accuracy_score(labels, preds)
return to_ret
def print_for_binary_class(self, results, cl):
table = [["Groups:"] + sorted(results.keys())]
metrics = None
for gname, stats in results.items():
# e.g. accumulated might not always appear in the results
if cl not in stats:
return
metrics = stats[cl].keys()
break
for key in sorted(metrics):
row = [f"{key}:"]
for gname in sorted(results.keys()):
if cl not in results[gname] or key not in results[gname][cl]:
continue
row.append(f"{results[gname][cl][key]:.3f}")
table.append(row)
if len(table) > 1:
print(f"======== CLASS {cl}")
print(tabulate(table))
def summary(self, core_record, res_munch, verbose=False, **kwargs):
results = res_munch.results
n_examples = res_munch.n_examples
print(f"Examples used for evaluation: {n_examples}")
if self.n_classes == 2:
self.print_for_binary_class(results, 1)
else:
print("==== Results accumulates across the classes")
self.print_for_binary_class(results, ACCUMULATED_STR)
print("==== Results for one class vs other")
for i in range(self.n_classes):
self.print_for_binary_class(results, i)
|
the-stack_106_16461
|
__author__ = 'heroico'
import sqlite3
import numpy
from .. import KeyedDataSet
from .. import Exceptions
import os
class DBLoaders(object):
@classmethod
def loadKeyedDataSetFromDB(cls, db_path, table_name, key_column, value_column):
if not os.path.exists(db_path):
raise Exceptions.BadFilename(db_path)
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
query = "SELECT "+key_column+", "+value_column+" FROM " + table_name
try:
entries = cursor.execute(query)
except sqlite3.Error as e:
raise Exceptions.InvalidDbFormat(db_path, e.args[0])
rsids = []
variances = []
for entry in entries:
rsids.append(entry[0])
variances.append(entry[1])
keyed_data_set = KeyedDataSet.KeyedDataSet(db_path, None, variances, rsids)
return keyed_data_set
@classmethod
def loadVariancesFromDB(cls, db_path):
keyed_data_set = cls.loadKeyedDataSetFromDB(db_path, "variances", "rsid", "var")
return keyed_data_set
@classmethod
def loadCovarianceMatrix(cls, db_path, keys):
if not os.path.exists(db_path):
raise Exceptions.BadFilename(db_path)
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
key_filter = {}
valid_keys = [] # list of valid rsid 1s
values = {} # Lookup for all observed pairwise covariances
# rsx=>{rsy=>X,},
def get_row(dict, key):
row = None
if key in dict:
row = dict[key]
else:
row = {}
dict[key] = row
return row
cursor.execute("SELECT rsid1, rsid2, covariance FROM covariances")
results = cursor.fetchall()
for result in results:
rsid1 = result[0]
rsid2 = result[1]
value = result[2]
if value == "NA":
continue
if not rsid1 in key_filter:
key_filter[rsid1] = True
valid_keys.append(rsid1)
value = float(value)
row_1 = get_row(values, rsid1)
row_1[rsid2] = value
row_2 = get_row(values, rsid2)
row_2[rsid1] = value
connection.close()
valid_rows = []
for i in range(0, len(valid_keys)):
valid_row = []
valid_rows.append(valid_row)
for j in range(0, len(valid_keys)):
key_i = valid_keys[i]
key_j = valid_keys[j]
value = values[key_i][key_j]
valid_row.append(value)
covariance_matrix = numpy.array(valid_rows)
return covariance_matrix, valid_keys
|
the-stack_106_16462
|
# -*- coding: utf-8 -*-
import torch.nn as nn
import torch
# from pytorch_transformers import XLNetConfig, XLNetModel # old-version
# from transformers import XLNetConfig, XLNetModel, XLNetTokenizer
from transformers import BertTokenizer, BertModel
class Encoder_BERT(nn.Module):
def __init__(self, config, x_embed):
super().__init__()
pretrained_weights = "bert-base-uncased"
# tokenizer = BertTokenizer.from_pretrained(pretrained_weights, do_lower_case=True)
self.model = BertModel.from_pretrained(pretrained_weights)
# self.pretrained_config = XLNetConfig.from_pretrained(pretrained_weights)
# if config.use_gpu:
# self.model = self.model.to(device=torch.device("cuda"))
# if config.use_parallel:
# self.model = torch.nn.DataParallel(self.model)
self.encoder_out_size = 768
return
# end __init__
#
def forward(self, text_inputs, mask_input, len_seq, mode=""):
encoder_out = []
self.model.eval()
with torch.no_grad():
# print(text_inputs)
# encoder_out = self.model(text_inputs, None, mask_input)[0] ## should be tested with input-mask
# encoder_out = self.model(text_inputs, None)[0] ## should be tested with input-mask
# encoder_out = self.model(text_inputs, None, None, attention_mask=mask_input)[0]
encoder_out = self.model(text_inputs, attention_mask=mask_input)[0]
## input_mask: torch.FloatTensor of shape (batch_size, seq_len)
encoder_out = encoder_out * mask_input.unsqueeze(2)
return encoder_out
|
the-stack_106_16465
|
# Import the WebIDL module, so we can do isinstance checks and whatnot
import WebIDL
def WebIDLTest(parser, harness):
# Basic functionality
threw = False
try:
parser.parse("""
A implements B;
interface B {
attribute long x;
};
interface A {
attribute long y;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw, "Should not have thrown on implements statement "
"before interfaces")
harness.check(len(results), 3, "We have three statements")
harness.ok(isinstance(results[1], WebIDL.IDLInterface), "B is an interface")
harness.check(len(results[1].members), 1, "B has one member")
A = results[2]
harness.ok(isinstance(A, WebIDL.IDLInterface), "A is an interface")
harness.check(len(A.members), 2, "A has two members")
harness.check(A.members[0].identifier.name, "y", "First member is 'y'")
harness.check(A.members[1].identifier.name, "x", "Second member is 'x'")
# Duplicated member names not allowed
threw = False
try:
parser.parse("""
C implements D;
interface D {
attribute long x;
};
interface C {
attribute long x;
};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interface duplicating "
"a name on base interface")
# Same, but duplicated across implemented interfaces
threw = False
try:
parser.parse("""
E implements F;
E implements G;
interface F {
attribute long x;
};
interface G {
attribute long x;
};
interface E {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interfaces "
"duplicating each other's member names")
# Same, but duplicated across indirectly implemented interfaces
threw = False
try:
parser.parse("""
H implements I;
H implements J;
I implements K;
interface K {
attribute long x;
};
interface L {
attribute long x;
};
interface I {};
interface J : L {};
interface H {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on indirectly implemented interfaces "
"duplicating each other's member names")
# Same, but duplicated across an implemented interface and its parent
threw = False
try:
parser.parse("""
M implements N;
interface O {
attribute long x;
};
interface N : O {
attribute long x;
};
interface M {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interface and its "
"ancestor duplicating member names")
# Reset the parser so we can actually find things where we expect
# them in the list
parser = parser.reset()
# Diamonds should be allowed
threw = False
try:
parser.parse("""
P implements Q;
P implements R;
Q implements S;
R implements S;
interface Q {};
interface R {};
interface S {
attribute long x;
};
interface P {};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw, "Diamond inheritance is fine")
harness.check(results[6].identifier.name, "S", "We should be looking at 'S'")
harness.check(len(results[6].members), 1, "S should have one member")
harness.check(results[6].members[0].identifier.name, "x",
"S's member should be 'x'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface {
};
TestInterface implements TestCallbackInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow callback interfaces on the right-hand side "
"of 'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface {
};
TestCallbackInterface implements TestInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow callback interfaces on the left-hand side of "
"'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
dictionary Dict {
};
Dict implements TestInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow non-interfaces on the left-hand side "
"of 'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
dictionary Dict {
};
TestInterface implements Dict;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow non-interfaces on the right-hand side "
"of 'implements'")
|
the-stack_106_16466
|
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
import gpflow
from gpflow import default_float
from gpflow.base import PriorOn
from gpflow.config import set_default_float
from gpflow.utilities import to_default_float
from tensorflow_probability.python.bijectors import Exp
from tensorflow_probability.python.distributions import Uniform, Gamma
np.random.seed(1)
def build_data():
N = 30
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.66 * np.cos(25 * X) + np.random.randn(N, 1) * 0.1 + 3
return (X, Y)
def build_model(data):
kernel = gpflow.kernels.Matern52(lengthscales=0.3)
meanf = gpflow.mean_functions.Linear(1.0, 0.0)
model = gpflow.models.GPR(data, kernel, meanf)
model.likelihood.variance.assign(0.01)
for p in model.parameters:
p.prior = Gamma(to_default_float(1.0), to_default_float(1.0))
return model
def test_mcmc_helper_parameters():
data = build_data()
model = build_model(data)
hmc_helper = gpflow.optimizers.SamplingHelper(
model.log_marginal_likelihood, model.trainable_parameters
)
for i in range(len(model.trainable_parameters)):
assert model.trainable_parameters[i].shape == hmc_helper.current_state[i].shape
assert model.trainable_parameters[i] == hmc_helper._parameters[i]
if isinstance(model.trainable_parameters[i], gpflow.Parameter):
assert (
model.trainable_parameters[i].unconstrained_variable == hmc_helper.current_state[i]
)
def test_mcmc_helper_target_function_constrained():
""" Set up priors on the model parameters such that we can
readily compute their expected values. """
data = build_data()
model = build_model(data)
prior_width = 200.0
hmc_helper = gpflow.optimizers.SamplingHelper(
model.log_marginal_likelihood, model.trainable_parameters
)
target_log_prob_fn = hmc_helper.target_log_prob_fn
# Priors which are set on the constrained space
expected_log_prior = 0.0
for param in model.trainable_parameters:
if param.value() < 1e-3:
# Avoid values which would be pathological for the Exp transform
param.assign(1.0)
param.transform = Exp()
low_value = -100
high_value = low_value + prior_width
param.prior = Uniform(low=np.float64(low_value), high=np.float64(high_value))
param.prior_on = PriorOn.CONSTRAINED
prior_density_on_constrained = 1 / prior_width
prior_density_on_unconstrained = prior_density_on_constrained * param.value()
expected_log_prior += np.log(prior_density_on_unconstrained)
log_likelihood = model.log_likelihood().numpy()
expected_log_prob = log_likelihood + expected_log_prior
np.testing.assert_allclose(target_log_prob_fn(), expected_log_prob)
def test_mcmc_helper_target_function_unconstrained():
""" Verifies the objective for a set of priors which are defined on the unconstrained space.
"""
data = build_data()
model = build_model(data)
# Set up priors on the model parameters such that we can readily compute their expected values.
expected_log_prior = 0.0
prior_width = 200.0
hmc_helper = gpflow.optimizers.SamplingHelper(
model.log_marginal_likelihood, model.trainable_parameters
)
for param in model.trainable_parameters:
low_value = -100
high_value = low_value + prior_width
param.prior = Uniform(low=np.float64(low_value), high=np.float64(high_value))
param.prior_on = "unconstrained"
prior_density = 1 / prior_width
expected_log_prior += np.log(prior_density)
target_log_prob_fn = hmc_helper.target_log_prob_fn
expected_log_prob = model.log_likelihood().numpy() + expected_log_prior
np.testing.assert_allclose(target_log_prob_fn(), expected_log_prob)
@pytest.mark.parametrize("prior_on", ["constrained", "unconstrained"])
def test_mcmc_helper_target_function_no_transforms(prior_on):
""" Verifies the objective for a set of priors where no transforms are set.
"""
data = build_data()
model = build_model(data)
expected_log_prior = 0.0
prior_width = 200.0
hmc_helper = gpflow.optimizers.SamplingHelper(
model.log_marginal_likelihood, model.trainable_parameters
)
for param in model.trainable_parameters:
param.transform = None
low_value = -100
high_value = low_value + prior_width
param.prior = Uniform(low=np.float64(low_value), high=np.float64(high_value))
param.prior_on = prior_on
prior_density = 1 / prior_width
expected_log_prior += np.log(prior_density)
log_likelihood = model.log_likelihood().numpy()
expected_log_prob = log_likelihood + expected_log_prior
target_log_prob_fn = hmc_helper.target_log_prob_fn
np.testing.assert_allclose(target_log_prob_fn(), expected_log_prob)
# Test the wrapped closure
log_prob, grad_fn = target_log_prob_fn.__original_wrapped__()
grad, nones = grad_fn(1, [None] * len(model.trainable_parameters))
assert len(grad) == len(model.trainable_parameters)
assert nones == [None] * len(model.trainable_parameters)
def test_mcmc_sampler_integration():
data = build_data()
model = build_model(data)
hmc_helper = gpflow.optimizers.SamplingHelper(
model.log_marginal_likelihood, model.trainable_parameters
)
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=hmc_helper.target_log_prob_fn, num_leapfrog_steps=2, step_size=0.01,
)
adaptive_hmc = tfp.mcmc.SimpleStepSizeAdaptation(
hmc,
num_adaptation_steps=2,
target_accept_prob=gpflow.utilities.to_default_float(0.75),
adaptation_rate=0.1,
)
num_samples = 5
@tf.function
def run_chain_fn():
return tfp.mcmc.sample_chain(
num_results=num_samples,
num_burnin_steps=2,
current_state=hmc_helper.current_state,
kernel=adaptive_hmc,
trace_fn=lambda _, pkr: pkr.inner_results.is_accepted,
)
samples, _ = run_chain_fn()
assert len(samples) == len(model.trainable_parameters)
parameter_samples = hmc_helper.convert_to_constrained_values(samples)
assert len(parameter_samples) == len(samples)
for i in range(len(model.trainable_parameters)):
assert len(samples[i]) == num_samples
assert hmc_helper.current_state[i].numpy() == samples[i][-1]
assert hmc_helper._parameters[i].numpy() == parameter_samples[i][-1]
@pytest.mark.xfail(raises=ValueError)
def test_helper_with_variables_fails():
variable = tf.Variable(0.1)
gpflow.optimizers.SamplingHelper(lambda: variable ** 2, (variable,))
|
the-stack_106_16467
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Apple's JetStream benchmark.
JetStream combines a variety of JavaScript benchmarks, covering a variety of
advanced workloads and programming techniques, and reports a single score that
balances them using geometric mean.
Each benchmark measures a distinct workload, and no single optimization
technique is sufficient to speed up all benchmarks. Latency tests measure that
a web application can start up quickly, ramp up to peak performance quickly,
and run smoothly without interruptions. Throughput tests measure the sustained
peak performance of a web application, ignoring ramp-up time and spikes in
smoothness. Some benchmarks demonstrate trade-offs, and aggressive or
specialized optimization for one benchmark might make another benchmark slower.
"""
import json
import os
from core import perf_benchmark
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.page import legacy_page_test
from telemetry import story
from telemetry.util import statistics
from telemetry.value import list_of_scalar_values
class _JetstreamMeasurement(legacy_page_test.LegacyPageTest):
def __init__(self):
super(_JetstreamMeasurement, self).__init__()
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = """
var __results = [];
var __real_log = window.console.log;
window.console.log = function() {
__results.push(Array.prototype.join.call(arguments, ' '));
__real_log.apply(this, arguments);
}
"""
def ValidateAndMeasurePage(self, page, tab, results):
get_results_js = """
(function() {
for (var i = 0; i < __results.length; i++) {
if (!__results[i].indexOf('Raw results: ')) return __results[i];
}
return null;
})();
"""
tab.WaitForDocumentReadyStateToBeComplete()
tab.EvaluateJavaScript('JetStream.start()')
tab.WaitForJavaScriptExpression(get_results_js, 600)
result = tab.EvaluateJavaScript(get_results_js)
result = json.loads(result.partition(': ')[2])
all_score_lists = []
for k, v in result.iteritems():
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, k.replace('.', '_'), 'score', v['result'],
important=False))
# Collect all test scores to compute geometric mean.
for i, score in enumerate(v['result']):
if len(all_score_lists) <= i:
all_score_lists.append([])
all_score_lists[i].append(score)
all_scores = []
for score_list in all_score_lists:
all_scores.append(statistics.GeometricMean(score_list))
results.AddSummaryValue(list_of_scalar_values.ListOfScalarValues(
None, 'Score', 'score', all_scores))
@benchmark.Disabled('android')
class Jetstream(perf_benchmark.PerfBenchmark):
test = _JetstreamMeasurement
@classmethod
def Name(cls):
return 'jetstream'
def CreateStorySet(self, options):
ps = story.StorySet(
archive_data_file='../page_sets/data/jetstream.json',
base_dir=os.path.dirname(os.path.abspath(__file__)),
cloud_storage_bucket=story.INTERNAL_BUCKET)
ps.AddStory(page_module.Page(
'http://browserbench.org/JetStream/', ps, ps.base_dir,
make_javascript_deterministic=False))
return ps
|
the-stack_106_16468
|
#1047 Tempo de jogo com minutos 14/04/2020
entrada = input().split()
a, b, c, d = entrada
a = int(a)
b = int(b)
c = int(c)
d = int(d)
inicio = a * 60 + b
fim = c * 60 + d
if inicio < fim:
hora = (fim - inicio) // 60
min = (fim - inicio) % 60
print('O JOGO DUROU {} HORA(S) E {} MINUTO(S)'.format(hora, min))
elif inicio > fim:
hora = (24 * 60 - inicio + fim) // 60
min = (24 * 60 - inicio + fim) % 60
print('O JOGO DUROU {} HORA(S) E {} MINUTO(S)'.format(hora, min))
else:
print('O JOGO DUROU 24 HORA(S) E 0 MINUTO(S)')
|
the-stack_106_16469
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMpi4py(PythonPackage):
"""This package provides Python bindings for the Message Passing
Interface (MPI) standard. It is implemented on top of the
MPI-1/MPI-2 specification and exposes an API which grounds on the
standard MPI-2 C++ bindings.
"""
homepage = "https://pypi.python.org/pypi/mpi4py"
url = "https://pypi.io/packages/source/m/mpi4py/mpi4py-3.0.3.tar.gz"
git = "https://github.com/mpi4py/mpi4py.git"
version('develop', branch='master')
version('3.0.3', sha256='012d716c8b9ed1e513fcc4b18e5af16a8791f51e6d1716baccf988ad355c5a1f')
version('3.0.1', sha256='6549a5b81931303baf6600fa2e3bc04d8bd1d5c82f3c21379d0d64a9abcca851')
version('3.0.0', sha256='b457b02d85bdd9a4775a097fac5234a20397b43e073f14d9e29b6cd78c68efd7')
version('2.0.0', sha256='6543a05851a7aa1e6d165e673d422ba24e45c41e4221f0993fe1e5924a00cb81')
version('1.3.1', sha256='e7bd2044aaac5a6ea87a87b2ecc73b310bb6efe5026031e33067ea3c2efc3507')
depends_on('[email protected]:2.7.99,3.2:')
depends_on('py-setuptools', type='build')
depends_on('mpi')
depends_on('[email protected]:', when='@develop', type='build')
def build_args(self, spec, prefix):
return ['--mpicc=%s -shared' % spec['mpi'].mpicc]
def setup_build_environment(self, env):
# Python is not built with NVHPC, but the compiler flags that were used
# to build Python are inherited by the build of py-mpi4py and passed to
# NVHPC. This can lead to errors, but by injecting this extra flag we
# can demote those errors to warnings.
if self.spec.compiler.name == 'nvhpc':
env.append_flags('SPACK_CFLAGS', '-noswitcherror')
env.append_flags('SPACK_CXXFLAGS', '-noswitcherror')
@property
def headers(self):
headers = find_all_headers(self.prefix.lib)
return headers
|
the-stack_106_16470
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetModelDeploymentMonitoringJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_async]
from google.cloud import aiplatform_v1beta1
async def sample_get_model_deployment_monitoring_job():
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest(
name="name_value",
)
# Make the request
response = await client.get_model_deployment_monitoring_job(request=request)
# Handle the response
print(response)
# [END aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_async]
|
the-stack_106_16471
|
import dask
import dask.dataframe as dd
import json
import pandas as pd
import numpy as np
import os.path
import csv
import boto3
from dask.distributed import Client
import time
def load_dataset(client, data_dir, s3_bucket, nbytes, npartitions):
num_bytes_per_partition = nbytes // npartitions
filenames = []
@dask.delayed
def generate_s3_file(i, data_dir, s3_bucket):
s3 = boto3.client("s3")
key = "df-{}-{}.parquet.gzip".format(num_bytes_per_partition, i)
contents = s3.list_objects(Bucket=s3_bucket, Prefix=key)
for obj in contents.get("Contents", []):
if obj["Key"] == key:
print(f"S3 partition {i} exists")
return
filename = os.path.join(data_dir, key)
if not os.path.exists(filename):
print("Generating partition", filename)
nrows = num_bytes_per_partition // 8
dataset = pd.DataFrame(
np.random.randint(
0, np.iinfo(np.int64).max, size=(nrows, 1),
dtype=np.int64),
columns=["a"])
dataset.to_parquet(filename, compression="gzip")
print("Writing partition to S3", filename)
with open(filename, "rb") as f:
s3.put_object(Bucket=s3_bucket, Key=key, Body=f)
x = []
for i in range(npartitions):
x.append(generate_s3_file(i, data_dir, s3_bucket))
dask.compute(x)
filenames = [
f"s3://{s3_bucket}/df-{num_bytes_per_partition}-{i}.parquet.gzip"
for i in range(npartitions)
]
df = dd.read_parquet(filenames)
return df
def load_dataset_files(client, data_dir, file_path, nbytes, npartitions):
num_bytes_per_partition = nbytes // npartitions
filenames = []
@dask.delayed
def generate_file(i, data_dir, file_path):
key = "{}/df-{}-{}.parquet.gzip".format(file_path,
num_bytes_per_partition, i)
from os import path
if path.exists(key):
print(f"The file {key} already exists. Do nothing")
return
filename = os.path.join(data_dir, key)
if not os.path.exists(filename):
print("Generating partition", filename)
nrows = num_bytes_per_partition // 8
dataset = pd.DataFrame(
np.random.randint(
0, np.iinfo(np.int64).max, size=(nrows, 1),
dtype=np.int64),
columns=["a"])
dataset.to_parquet(filename, compression="gzip")
print("Writing partition to a file", filename)
x = []
for i in range(npartitions):
x.append(generate_file(i, data_dir, file_path))
dask.compute(x)
filenames = [
f"{file_path}/df-{num_bytes_per_partition}-{i}.parquet.gzip"
for i in range(npartitions)
]
df = dd.read_parquet(filenames)
return df
def trial(client,
data_dir,
nbytes,
n_partitions,
generate_only,
s3_bucket=None,
file_path=None):
if s3_bucket:
df = load_dataset(client, data_dir, s3_bucket, nbytes, n_partitions)
elif file_path:
df = load_dataset_files(client, data_dir, file_path, nbytes,
n_partitions)
if generate_only:
return []
times = []
start = time.time()
for i in range(10):
print("Trial {} start".format(i))
trial_start = time.time()
print(
df.set_index("a", shuffle="tasks", max_branch=float("inf")).head(
10, npartitions=-1))
trial_end = time.time()
duration = trial_end - trial_start
times.append(duration)
print("Trial {} done after {}".format(i, duration))
if time.time() - start > 60:
break
return times
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--nbytes", type=int, default=1_000_000)
parser.add_argument("--npartitions", type=int, default=100, required=False)
# Max partition size is 1GB.
parser.add_argument(
"--max-partition-size", type=int, default=1000_000_000, required=False)
parser.add_argument("--num-nodes", type=int, default=1)
parser.add_argument("--dask-tasks", action="store_true")
parser.add_argument("--generate-only", action="store_true")
parser.add_argument("--ray", action="store_true")
parser.add_argument("--data-dir", default="/home/ubuntu/dask-benchmarks")
parser.add_argument("--s3-bucket")
parser.add_argument("--file-path")
parser.add_argument("--dask-nprocs", type=int, default=0)
parser.add_argument("--dask-nthreads", type=int, default=0)
parser.add_argument("--dask-memlimit", type=int, default=0)
args = parser.parse_args()
assert not (args.s3_bucket
and args.file_path), "Provide S3 bucket or file path."
if args.ray:
import ray
ray.init(address="auto")
from ray.util.dask import ray_dask_get, dataframe_optimize
dask.config.set(
scheduler=ray_dask_get, dataframe_optimize=dataframe_optimize)
client = None
else:
assert args.dask_nprocs != -0
assert args.dask_nthreads != -0
assert args.dask_memlimit != -0
if args.dask_tasks:
print("Using task-based Dask shuffle")
dask.config.set(shuffle="tasks")
else:
print("Using disk-based Dask shuffle")
client = Client("localhost:8786")
print(
trial(
client,
args.data_dir,
1000,
10,
args.generate_only,
s3_bucket=args.s3_bucket,
file_path=args.file_path))
print("WARMUP DONE")
npartitions = args.npartitions
if args.nbytes // npartitions > args.max_partition_size:
npartitions = args.nbytes // args.max_partition_size
duration = []
output = trial(
client,
args.data_dir,
args.nbytes,
npartitions,
args.generate_only,
s3_bucket=args.s3_bucket,
file_path=args.file_path)
print("mean over {} trials: {} +- {}".format(
len(output), np.mean(output), np.std(output)))
print(ray.internal.internal_api.memory_summary(stats_only=True))
duration = np.mean(output)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as f:
f.write(json.dumps({"duration": duration, "success": 1}))
write_header = not os.path.exists("output.csv") or os.path.getsize(
"output.csv") == 0
with open("output.csv", "a+") as csvfile:
fieldnames = [
"num_nodes", "nbytes", "npartitions", "dask_tasks", "dask_nprocs",
"dask_nthreads", "dask_memlimit", "duration"
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if write_header:
writer.writeheader()
row = {
"num_nodes": args.num_nodes,
"nbytes": args.nbytes,
"npartitions": npartitions,
"dask_tasks": args.dask_tasks,
"dask_nprocs": args.dask_nprocs,
"dask_nthreads": args.dask_nthreads,
"dask_memlimit": args.dask_memlimit,
}
for output in output:
row["duration"] = output
writer.writerow(row)
|
the-stack_106_16476
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ct', '0027_auto_20180904_1112'),
]
operations = [
migrations.AddField(
model_name='lesson',
name='mc_simplified',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='lesson',
name='add_unit_aborts',
field=models.BooleanField(default=False),
),
]
|
the-stack_106_16478
|
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def proper_noun_extractor(text, noun_limit=2):
out = ''
words = nltk.word_tokenize(text)
words = [word for word in words if word not in set(stopwords.words('english'))]
tagged = nltk.pos_tag(words)
for (word, tag) in tagged[:noun_limit + 1]:
if tag == 'NNP':
out += word + ' '
return out.strip()
|
the-stack_106_16479
|
from Instrucciones.Identificador import Identificador
from Instrucciones.TablaSimbolos.Tipo import Tipo, Tipo_Dato
from Instrucciones.Expresiones.Primitivo import Primitivo
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Excepcion import *
import hashlib
class Md5(Instruccion):
def __init__(self, valor, tipo, linea, columna):
Instruccion.__init__(self,tipo,linea,columna)
self.valor = valor
self.tipo = tipo
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
resultado = self.valor.ejecutar(tabla,arbol)
if isinstance(resultado, Excepcion):
return resultado
if self.valor.tipo.tipo== Tipo_Dato.CHAR or self.valor.tipo.tipo== Tipo_Dato.VARCHAR or self.valor.tipo.tipo== Tipo_Dato.VARYING or self.valor.tipo.tipo== Tipo_Dato.CHARACTER or self.valor.tipo.tipo== Tipo_Dato.TEXT:
self.tipo= Tipo(Tipo_Dato.TEXT)
return hashlib.md5(str(resultado).encode("utf-8")).hexdigest()
error = Excepcion('42883',"Semántico",f"No existe la función MD5({self.valor.tipo.toString()})",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append("HINT: Ninguna función coincide en el nombre y tipos de argumentos. Puede ser necesario agregar conversión explícita de tipos.")
arbol.consola.append(error.toString())
return error
'''
instruccion = Md5("hola mundo",None, 1,2)
instruccion.ejecutar(None,None)
'''
|
the-stack_106_16480
|
import yaml
import os
import urllib.request
import ssl
import certifi
import pdfkit
import html2text
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import HTMLConverter
from pdfminer.layout import LAParams
from pycti import OpenCTIConnectorHelper, get_config_variable
class ImportExternalReferenceConnector:
def __init__(self):
# Instantiate the connector helper from config
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.FullLoader)
if os.path.isfile(config_file_path)
else {}
)
self.helper = OpenCTIConnectorHelper(config)
self.import_as_pdf = get_config_variable(
"IMPORT_EXTERNAL_REFERENCE_IMPORT_AS_PDF",
["import_external_reference", "import_as_pdf"],
config,
False,
True,
)
self.import_as_md = get_config_variable(
"IMPORT_EXTERNAL_REFERENCE_IMPORT_AS_MD",
["import_external_reference", "import_as_md"],
config,
False,
True,
)
self.import_pdf_as_md = get_config_variable(
"IMPORT_EXTERNAL_REFERENCE_IMPORT_PDF_AS_MD",
["import_external_reference", "import_pdf_as_md"],
config,
False,
True,
)
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"}
def delete_files(self):
if os.path.exists("data.html"):
os.remove("data.html")
if os.path.exists("data.pdf"):
os.remove("data.pdf")
def _process_external_reference(self, external_reference):
if "url" not in external_reference:
raise ValueError("No URL in this external reference, doing nothing")
url_to_import = external_reference["url"].strip("/")
# If the URL is a PDF file, just download it
if self.import_as_pdf:
try:
if url_to_import.endswith(".pdf"):
# Download file
file_name = url_to_import.split("/")[-1]
req = urllib.request.Request(url_to_import, headers=self.headers)
response = urllib.request.urlopen(
req, context=ssl.create_default_context(cafile=certifi.where())
)
data = response.read()
self.helper.api.external_reference.add_file(
id=external_reference["id"],
file_name=file_name,
data=data,
mime_type="application/pdf",
)
else:
try:
file_name = url_to_import.split("/")[-1] + ".pdf"
options = {
"javascript-delay": 10000,
"load-error-handling": "skip",
"custom-header": [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64)",
),
],
}
data = pdfkit.from_url(url_to_import, False, options=options)
self.helper.api.external_reference.add_file(
id=external_reference["id"],
file_name=file_name,
data=data,
mime_type="application/pdf",
)
except OSError as e:
if "Done" not in str(e):
raise e
except Exception as e:
self.helper.log_error(e)
if self.import_as_md:
if url_to_import.endswith(".pdf") and self.import_pdf_as_md:
try:
urllib.request.urlretrieve(url_to_import, "./data.pdf")
outfp = open("./data.html", "w", encoding="utf-8")
rsrcmgr = PDFResourceManager(caching=True)
device = HTMLConverter(
rsrcmgr,
outfp,
scale=1,
layoutmode="normal",
laparams=LAParams(),
imagewriter=None,
debug=False,
)
interpreter = PDFPageInterpreter(rsrcmgr, device)
with open("./data.pdf", "rb") as fp:
for page in PDFPage.get_pages(
fp,
set(),
maxpages=0,
password=b"",
caching=False,
check_extractable=True,
):
page.rotate = (page.rotate + 0) % 360
interpreter.process_page(page)
device.close()
outfp.close()
with open("./data.html", "r") as file:
html = file.read().replace("\n", "")
file_name = url_to_import.split("/")[-1] + ".md"
text_maker = html2text.HTML2Text()
text_maker.ignore_links = False
text_maker.ignore_images = False
text_maker.ignore_tables = False
text_maker.ignore_emphasis = False
data = text_maker.handle(html)
self.helper.api.external_reference.add_file(
id=external_reference["id"],
file_name=file_name,
data=data,
mime_type="text/markdown",
)
self.delete_files()
except Exception as e:
self.delete_files()
self.helper.log_error(e)
else:
try:
file_name = url_to_import.split("/")[-1] + ".md"
text_maker = html2text.HTML2Text()
text_maker.body_width = 0
text_maker.ignore_links = False
text_maker.ignore_images = False
text_maker.ignore_tables = False
text_maker.ignore_emphasis = False
text_maker.skip_internal_links = False
text_maker.inline_links = True
text_maker.protect_links = True
text_maker.mark_code = True
req = urllib.request.Request(url_to_import, headers=self.headers)
response = urllib.request.urlopen(
req, context=ssl.create_default_context(cafile=certifi.where())
)
html = response.read().decode("utf-8")
data = text_maker.handle(html)
data = data.replace("](//", "](https://")
self.helper.api.external_reference.add_file(
id=external_reference["id"],
file_name=file_name,
data=data,
mime_type="text/markdown",
)
except Exception as e:
self.helper.log_error(e)
return "Import process is finished."
def _process_message(self, data):
entity_id = data["entity_id"]
external_reference = self.helper.api.external_reference.read(id=entity_id)
return self._process_external_reference(external_reference)
# Start the main loop
def start(self):
self.helper.listen(self._process_message)
if __name__ == "__main__":
externalReferenceInstance = ImportExternalReferenceConnector()
externalReferenceInstance.start()
|
the-stack_106_16481
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Naive profiling using timeit. (Used in MonoBeast.)"""
import collections
import timeit
class Timings:
"""Not thread-safe."""
def __init__(self):
self._means = collections.defaultdict(int)
self._vars = collections.defaultdict(int)
self._counts = collections.defaultdict(int)
self.reset()
def reset(self):
self.last_time = timeit.default_timer()
def time(self, name):
"""Save an update for event `name`.
Nerd alarm: We could just store a
collections.defaultdict(list)
and compute means and standard deviations at the end. But thanks to the
clever math in Sutton-Barto
(http://www.incompleteideas.net/book/first/ebook/node19.html) and
https://math.stackexchange.com/a/103025/5051 we can update both the
means and the stds online. O(1) FTW!
"""
now = timeit.default_timer()
x = now - self.last_time
self.last_time = now
n = self._counts[name]
mean = self._means[name] + (x - self._means[name]) / (n + 1)
var = (
n * self._vars[name] + n * (self._means[name] - mean) ** 2 + (x - mean) ** 2
) / (n + 1)
self._means[name] = mean
self._vars[name] = var
self._counts[name] += 1
def means(self):
return self._means
def vars(self):
return self._vars
def stds(self):
return {k: v ** 0.5 for k, v in self._vars.items()}
def summary(self, prefix=""):
means = self.means()
stds = self.stds()
total = sum(means.values())
result = prefix
for k in sorted(means, key=means.get, reverse=True):
result += "\n %s: %.6fms +- %.6fms (%.2f%%) " % (
k,
1000 * means[k],
1000 * stds[k],
100 * means[k] / total,
)
result += "\nTotal: %.6fms" % (1000 * total)
return result
|
the-stack_106_16483
|
from torch.autograd import Variable
import torch.nn as nn
import torch
import numpy as np
import os
import re
import pickle
import argparse
from rnn import *
parser = argparse.ArgumentParser(description='PyTorch char-rnn')
parser.add_argument('--temperature', type=float, default=0.8)
parser.add_argument('--sample_len', type=int, default=500)
parser.add_argument('--checkpoint', '-c', type=str)
parser.add_argument('--seed', type=str, default='a')
parser.add_argument('--charfile', '-f', type=str)
parser.add_argument('--concatenate', type=int, default=0)
args = parser.parse_args()
with open(args.charfile, 'rb') as f:
chars = pickle.load(f)
chars = sorted(list(set(chars)))
chars_len = len(chars)
char_to_index = {}
index_to_char = {}
for i, c in enumerate(chars):
char_to_index[c] = i
index_to_char[i] = c
random_state = np.random.RandomState(np.random.randint(1,9999))
def uppercase_sentences(match):
return match.group(1) + ' ' + match.group(2).upper()
def index_to_tensor(index):
tensor = torch.zeros(1, 1).long()
tensor[0,0] = index
return Variable(tensor)
def manual_sample(x, temperature):
x = x.reshape(-1).astype(np.float)
x /= temperature
x = np.exp(x)
x /= np.sum(x)
x = random_state.multinomial(1, x)
x = np.argmax(x)
return x.astype(np.int64)
def sample(model, prime_str, predict_len, temperature, concatenate):
with torch.no_grad():
hidden = model.create_hidden(1)
prime_tensors = [index_to_tensor(char_to_index[char]) for char in prime_str]
for prime_tensor in prime_tensors[-2:]:
_, hidden = model(prime_tensor, hidden)
inp = prime_tensors[-1]
predicted = prime_str
for p in range(predict_len):
output, hidden = model(inp, hidden)
# Sample from the network as a multinomial distribution
# output_dist = output.data.view(-1).div(temperature).exp()
# top_i = torch.multinomial(output_dist, 1)[0]
# Alternative: use numpy
top_i = manual_sample(output.data.numpy(), temperature)
# Add predicted character to string and use as next input
predicted_char = index_to_char[top_i]
predicted += predicted_char
inp = index_to_tensor(char_to_index[predicted_char])
predicted = predicted.split(' ', 1)[1].capitalize()
predicted = re.sub(r'([.?!]) ([a-z])', uppercase_sentences, predicted)
predicted = re.sub(r'([.?!]\n)([a-z])', uppercase_sentences, predicted)
predicted = re.sub(r'([.?!]\n *\n)([a-z])', uppercase_sentences, predicted)
if predicted.find('.'):
predicted = predicted[:predicted.rfind('.')+1]
if concatenate == -1:
predicted = re.sub(r'\n', ' ', predicted)
return predicted
if os.path.exists(args.checkpoint):
print('Parameters found at {}... loading'.format(args.checkpoint))
checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
else:
raise ValueError('File not found: {}'.format(args.checkpoint))
hidden_size = checkpoint['model']['encoder.weight'].size()[1]
n_layers = 0
for key in checkpoint['model'].keys():
if 'cells.weight_hh' in key:
n_layers = n_layers + 1
model = RNN(chars_len, hidden_size, chars_len, n_layers, 0.5)
model.load_state_dict(checkpoint['model'])
print(sample(model, args.seed, args.sample_len, args.temperature, args.concatenate))
|
the-stack_106_16484
|
from mykrobe.typing.typer.base import Typer
from mykrobe.stats import log_lik_R_S_coverage
from mykrobe.stats import log_lik_R_S_kmer_count
from mykrobe.typing.typer.base import MIN_LLK
from mykrobe.typing.typer.base import DEFAULT_MINOR_FREQ
from mykrobe.typing.typer.base import DEFAULT_ERROR_RATE
from mykrobe.stats import percent_coverage_from_expected_coverage
from mykrobe.stats import log_lik_probability_of_N_gaps
import logging
logger = logging.getLogger(__name__)
def likelihoods_to_confidence(l):
if not len(l) > 1:
raise ValueError(
"Must have at least 2 likelihoods to calculate confidence")
l_sorted = sorted(l, reverse=True)
if l_sorted[2] == l[0]:
# 0/1 and 1/1 are most likely - since haploide - conf compare with 0/0
return int(round(l_sorted[0] - l[0]))
else:
# Otherwise, compare with 2 most likely
return int(round(l_sorted[0] - l_sorted[1]))
class VariantTyper(Typer):
def __init__(self, expected_depths, contamination_depths=[],
error_rate=DEFAULT_ERROR_RATE,
minor_freq=DEFAULT_MINOR_FREQ,
ignore_filtered=False,
filters=[],
confidence_threshold=3,
model="kmer_count"):
super(
VariantTyper,
self).__init__(
expected_depths,
contamination_depths,
error_rate,
ignore_filtered=ignore_filtered,
filters=filters,
confidence_threshold=confidence_threshold)
self.method = "MAP"
self.error_rate = error_rate
self.minor_freq = minor_freq
if model == "median_depth":
self.model = DepthCoverageGenotypeModel(
self.expected_depths, self.contamination_depths, self.error_rate, self.minor_freq)
elif model == "kmer_count":
logger.debug("Genotyping using kc model")
self.model = KmerCountGenotypeModel(
self.expected_depths, self.contamination_depths, self.error_rate, self.minor_freq)
self.ignore_filtered = ignore_filtered
self.filters = filters
if len(expected_depths) > 1:
raise NotImplementedError("Mixed samples not handled yet")
def type(self, variant_probe_coverages, variant=None):
"""
Takes a list of VariantProbeCoverages and returns a Call for the Variant.
Note, in the simplest case the list will be of length one. However, we may be typing the
Variant on multiple backgrouds leading to multiple VariantProbes for a single Variant.
"""
if not isinstance(variant_probe_coverages, list):
variant_probe_coverages = [variant_probe_coverages]
calls = []
for variant_probe_coverage in variant_probe_coverages:
calls.append(
self._type_variant_probe_coverages(
variant_probe_coverage, variant))
hom_alt_calls = [c for c in calls if sum(c["genotype"]) > 1]
het_calls = [c for c in calls if sum(c["genotype"]) == 1]
if hom_alt_calls:
hom_alt_calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return hom_alt_calls[0]
elif het_calls:
het_calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return het_calls[0]
else:
calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return calls[0]
def _type_variant_probe_coverages(
self, variant_probe_coverage, variant=None):
hom_ref_likelihood = self.model.hom_ref_lik(variant_probe_coverage)
hom_alt_likelihood = self.model.hom_alt_lik(variant_probe_coverage)
if not self.has_contamination():
het_likelihood = self.model.het_lik(variant_probe_coverage)
else:
het_likelihood = MIN_LLK
likelihoods = [hom_ref_likelihood, het_likelihood, hom_alt_likelihood]
confidence = likelihoods_to_confidence(likelihoods)
gt = self.likelihoods_to_genotype(
likelihoods
)
info = {"coverage": variant_probe_coverage.coverage_dict,
"expected_depths": self.expected_depths,
"contamination_depths": self.contamination_depths,
"filter": "PASS",
"conf": confidence}
if gt == "-/-" and not self.ignore_filtered:
if variant_probe_coverage.alternate_percent_coverage > variant_probe_coverage.reference_percent_coverage:
gt = "1/1"
else:
gt = "0/0"
if "MISSING_WT" in self.filters:
info["filter"] = "MISSING_WT"
elif "LOW_PERCENT_COVERAGE" in self.filters and variant_probe_coverage.alternate_percent_coverage < 100 and variant_probe_coverage.reference_percent_coverage < 100:
info["filter"] = "LOW_PERCENT_COVERAGE"
if self.ignore_filtered:
gt = "0/0"
if "LOW_GT_CONF" in self.filters and (confidence < self.confidence_threshold):
info["filter"] = "LOW_GT_CONF"
return {
"variant": variant,
"genotype": [
int(i) for i in gt.split("/")],
"genotype_likelihoods": likelihoods,
"info": info,
"_cls": "Call.VariantCall"}
class GenotypeModel(object):
def __init__(self, expected_depths, contamination_depths, error_rate, minor_freq):
self.expected_depths = expected_depths
self.contamination_depths = contamination_depths
self.error_rate = error_rate
self.minor_freq = minor_freq
def hom_ref_lik(self, variant_probe_coverage):
raise NotImplementedError
def hom_alt_lik(self, variant_probe_coverage):
raise NotImplementedError
def het_lik(self, variant_probe_coverage):
raise NotImplementedError
class KmerCountGenotypeModel(GenotypeModel):
def __init__(self, expected_depths, contamination_depths, error_rate, minor_freq):
super(KmerCountGenotypeModel, self).__init__(
expected_depths, contamination_depths, error_rate, minor_freq)
def hom_ref_lik(self, variant_probe_coverage):
hom_ref_likes = []
# Either alt+cov or alt_covg + contam_covg
for expected_depth in self.expected_depths:
hom_ref_likes.append(
log_lik_R_S_kmer_count(
variant_probe_coverage.reference_kmer_count,
variant_probe_coverage.alternate_kmer_count,
expected_depth,
expected_depth *
self.error_rate /
3))
for contamination in self.contamination_depths:
hom_ref_likes.append(
log_lik_R_S_kmer_count(
variant_probe_coverage.reference_kmer_count,
variant_probe_coverage.alternate_kmer_count,
expected_depth + contamination,
(expected_depth + contamination) * self.error_rate / 3))
return max(hom_ref_likes)
def hom_alt_lik(self, variant_probe_coverage):
hom_alt_liks = []
# Either alt+cov or alt_covg + contam_covg
for expected_depth in self.expected_depths:
hom_alt_liks.append(
log_lik_R_S_kmer_count(
variant_probe_coverage.alternate_kmer_count,
variant_probe_coverage.reference_kmer_count,
expected_depth,
expected_depth *
self.error_rate /
3))
for contamination in self.contamination_depths:
hom_alt_liks.append(
log_lik_R_S_kmer_count(
variant_probe_coverage.alternate_kmer_count,
variant_probe_coverage.reference_kmer_count,
expected_depth + contamination,
(expected_depth + contamination) * self.error_rate / 3))
return max(hom_alt_liks)
def het_lik(self, variant_probe_coverage):
if (variant_probe_coverage.alternate_kmer_count+variant_probe_coverage.reference_kmer_count) == 0:
return MIN_LLK
elif variant_probe_coverage.alternate_percent_coverage < 100 or variant_probe_coverage.reference_percent_coverage < 100:
return MIN_LLK
else:
het_liks = []
for expected_depth in self.expected_depths:
het_liks.append(
log_lik_R_S_kmer_count(
variant_probe_coverage.alternate_kmer_count,
variant_probe_coverage.reference_kmer_count,
expected_depth/2 +
(expected_depth/2 * self.error_rate/3),
expected_depth/2 + (expected_depth/2 * self.error_rate/3))
)
return max(het_liks)
class DepthCoverageGenotypeModel(GenotypeModel):
def __init__(self, expected_depths, contamination_depths, error_rate, minor_freq):
super(DepthCoverageGenotypeModel, self).__init__(
expected_depths, contamination_depths, error_rate, minor_freq)
def hom_ref_lik(self, variant_probe_coverage):
if variant_probe_coverage.reference_percent_coverage < 100 * \
percent_coverage_from_expected_coverage(max(self.expected_depths)):
return MIN_LLK
else:
hom_ref_likes = []
# Either alt+cov or alt_covg + contam_covg
for expected_depth in self.expected_depths:
hom_ref_likes.append(
log_lik_R_S_coverage(
variant_probe_coverage.reference_median_depth,
variant_probe_coverage.alternate_median_depth,
expected_depth,
expected_depth *
self.error_rate /
3))
for contamination in self.contamination_depths:
hom_ref_likes.append(
log_lik_R_S_coverage(
variant_probe_coverage.reference_median_depth,
variant_probe_coverage.alternate_median_depth,
expected_depth + contamination,
(expected_depth + contamination) * self.error_rate / 3))
return max(hom_ref_likes)
def hom_alt_lik(self, variant_probe_coverage):
if variant_probe_coverage.alternate_percent_coverage < 100 * \
percent_coverage_from_expected_coverage(max(self.expected_depths)):
return MIN_LLK
else:
hom_alt_liks = []
# Either alt+cov or alt_covg + contam_covg
for expected_depth in self.expected_depths:
hom_alt_liks.append(
log_lik_R_S_coverage(
variant_probe_coverage.alternate_median_depth,
variant_probe_coverage.reference_median_depth,
expected_depth,
expected_depth *
self.error_rate /
3))
for contamination in self.contamination_depths:
hom_alt_liks.append(
log_lik_R_S_coverage(
variant_probe_coverage.alternate_median_depth,
variant_probe_coverage.reference_median_depth,
expected_depth + contamination,
(expected_depth + contamination) * self.error_rate / 3))
return max(hom_alt_liks)
def het_lik(self, variant_probe_coverage):
if variant_probe_coverage.alternate_percent_coverage < 100 or variant_probe_coverage.reference_percent_coverage < 100:
return MIN_LLK
else:
het_liks = []
for expected_depth in self.expected_depths:
het_liks.append(
log_lik_R_S_coverage(
variant_probe_coverage.alternate_median_depth,
variant_probe_coverage.reference_median_depth,
expected_depth * self.minor_freq,
expected_depth * (
1 - self.minor_freq)))
return max(het_liks)
|
the-stack_106_16486
|
"""Clean Wikipedia dumps for use as a training corpus."""
import re
import argparse
import html
import bz2
import logging
from .utensils import log_timer
from multiprocessing import cpu_count
logging.basicConfig(format='[{levelname}] {message}', style='{', level=logging.INFO)
cores = int(cpu_count() / 2)
@log_timer
def strip_file(fname):
"""Strip xml and other tags from Wikipedia dump.
Writes stripped Wikipedia text directly to text file.
:param fname: Wikipedia dump file, in xml or bzip2 format.
"""
logging.info(f'stripping {fname}')
if fname.endswith('.bz2'):
with bz2.open(fname, 'rt', encoding='utf-8') as in_file, open(fname.replace('.xml.bz2', '.clean.txt'), 'w', encoding='utf-8') as out_file:
out_file.write(_strip_xml(in_file.read()))
if fname.endswith('.xml'):
with open(fname, 'r', encoding='utf-8') as in_file, open(fname.replace('.xml', '.clean.txt'), 'w', encoding='utf-8') as out_file:
out_file.write(_strip_xml(in_file.read()))
if fname.endswith('.txt'):
with open(fname, 'r', encoding='utf-8') as in_file, open(fname.replace('.txt', '.clean.txt'), 'w', encoding='utf-8') as out_file:
out_file.write(_strip_xml(in_file.read()))
logging.info(f'completed stripping {fname}')
@log_timer
def big_strip_file(fname, lines_per_chunk=1e6):
"""Strip xml and other tags from a Wikipedia dump that doesn't fit into RAM.
Processes Wikipedia dump in chunks and then concatenates the junks into a single text file.
:param fname: Wikipedia dump file, in xml or bzip2 format.
:param lines_per_chunk: number of lines in each chunk (default is 1e6, one million lines)
"""
logging.info(f'stripping {fname}')
if fname.endswith('.bz2'):
with bz2.open(fname, 'rt', encoding='utf-8') as in_file, open(fname.replace('.xml.bz2', '.clean.txt'), 'w', encoding='utf-8') as out_file:
i = 0
j = 0
lines = []
for line in in_file:
lines.append(line)
if i > ((j + 1) * int(lines_per_chunk)):
out_file.write(_strip_xml(''.join(lines)))
lines = []
j += 1
out_file.write(_strip_xml(''.join(lines)))
if fname.endswith('.xml'):
with open(fname, 'r', encoding='utf-8') as in_file, open(fname.replace('.xml', '.clean.txt'), 'w', encoding='utf-8') as out_file:
out_file.write(_strip_xml(in_file.read()))
if fname.endswith('.txt'):
with open(fname, 'r', encoding='utf-8') as in_file, open(fname.replace('.txt', '.clean.txt'), 'w', encoding='utf-8') as out_file:
out_file.write(_strip_xml(in_file.read()))
logging.info(f'completed stripping {fname}')
regeces = [
(r'(?s)<ref.*?</ref>', ''), # strip reference links
(r'(?s)<references.*?</references>', ''), # strip references
(r'(?s)<table.*?</table>', ''), # strip tables
(r'(?s)<gallery.*?</gallery>', ''), # strip galleries
(r'(?s)<kml.*?</kml>', ''), # strip KML tags
(r'<.*?>', ''), # strip other xml tags
(r'http.*?(?:[\s\n\]]|$)', ''), # strip external http(s) links
(r'\[\[[^\]]*?:.*\|(.*?)\]\]', '\\1'), # strip links to files, etc. but keep labels
(r'\[\[[^\]]*?:(.*?)\]\]', ''), # strip category links
(r'\[\[[^\]]*?\|(.*?)\]\]', '\\1'), # convert labeled links to just labels
(r'(?m)^[\s]*[!?*;:=+\-|#_].*?$', ''), # strip lines that do not start with alphanumerics, quotes, or brackets
(r'(?m)^.*?\(UTC\).*?$', ''), # strip lines containing a time stamp
(r'\s\(.*?\)', ''), # remove everything in parentheses
(r'([^\s.!?:;]{2})[.!?:;]+?[\s\n]|$', '\\1\n'), # break sentences at periods
(r"[-–—/']", ' '), # replace hyphens, apostrophes and slashes with spaces
(r'\s*\n\s*', '\n'), # strip empty lines and lines containing whitespace
(r'\s{2,}', ' '), # strip excessive spaces
]
patterns = [(re.compile(regec[0], re.IGNORECASE), regec[1]) for regec in regeces]
def _strip_xml(txts):
"""Strip xml and other tags from Wikipedia text.
:param txts: Wikipedia dump text containing multiple articles
:return: stripped Wikipedia text
"""
txts = html.unescape(html.unescape(txts)) # double unescape because Wikipedia dumps are a mess
txts = txts.split('\n')
for i in range(len(txts)):
for pattern in patterns:
txts[i] = pattern[0].sub(pattern[1], txts[i])
txts = [''.join([letter for letter in txt if (letter.isalnum() or letter.isspace())]) for txt in txts if txt != '']
return '\n'.join(txts)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='strip text files of xml and other tags')
argparser.add_argument('fname', help='name of file')
argparser.add_argument('--big', action='store_true', help='use special method for files that do not fit in RAM')
args = argparser.parse_args()
if args.big:
big_strip_file(fname=args.fname)
else:
strip_file(fname=args.fname)
|
the-stack_106_16488
|
from toolz import (
pipe,
)
from eth_utils import (
to_bytes,
to_int,
)
from eth_account._utils.transactions import (
ChainAwareUnsignedTransaction,
UnsignedTransaction,
encode_transaction,
serializable_unsigned_transaction_from_dict,
strip_signature,
)
CHAIN_ID_OFFSET = 35
V_OFFSET = 27
# signature versions
PERSONAL_SIGN_VERSION = b'E' # Hex value 0x45
INTENDED_VALIDATOR_SIGN_VERSION = b'\x00' # Hex value 0x00
STRUCTURED_DATA_SIGN_VERSION = b'\x01' # Hex value 0x01
def sign_transaction_dict(eth_key, transaction_dict):
# generate RLP-serializable transaction, with defaults filled
unsigned_transaction = serializable_unsigned_transaction_from_dict(transaction_dict)
transaction_hash = unsigned_transaction.hash()
# detect chain
if isinstance(unsigned_transaction, UnsignedTransaction):
chain_id = None
else:
chain_id = unsigned_transaction.v
# sign with private key
(v, r, s) = sign_transaction_hash(eth_key, transaction_hash, chain_id)
# serialize transaction with rlp
encoded_transaction = encode_transaction(unsigned_transaction, vrs=(v, r, s))
return (v, r, s, encoded_transaction)
def hash_of_signed_transaction(txn_obj):
"""
Regenerate the hash of the signed transaction object.
1. Infer the chain ID from the signature
2. Strip out signature from transaction
3. Annotate the transaction with that ID, if available
4. Take the hash of the serialized, unsigned, chain-aware transaction
Chain ID inference and annotation is according to EIP-155
See details at https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md
:return: the hash of the provided transaction, to be signed
"""
(chain_id, _v) = extract_chain_id(txn_obj.v)
unsigned_parts = strip_signature(txn_obj)
if chain_id is None:
signable_transaction = UnsignedTransaction(*unsigned_parts)
else:
extended_transaction = unsigned_parts + [chain_id, 0, 0]
signable_transaction = ChainAwareUnsignedTransaction(*extended_transaction)
return signable_transaction.hash()
def extract_chain_id(raw_v):
"""
Extracts chain ID, according to EIP-155
@return (chain_id, v)
"""
above_id_offset = raw_v - CHAIN_ID_OFFSET
if above_id_offset < 0:
if raw_v in {0, 1}:
return (None, raw_v + V_OFFSET)
elif raw_v in {27, 28}:
return (None, raw_v)
else:
raise ValueError("v %r is invalid, must be one of: 0, 1, 27, 28, 35+")
else:
(chain_id, v_bit) = divmod(above_id_offset, 2)
return (chain_id, v_bit + V_OFFSET)
def to_standard_signature_bytes(ethereum_signature_bytes):
rs = ethereum_signature_bytes[:-1]
v = to_int(ethereum_signature_bytes[-1])
standard_v = to_standard_v(v)
return rs + to_bytes(standard_v)
def to_standard_v(enhanced_v):
(_chain, chain_naive_v) = extract_chain_id(enhanced_v)
v_standard = chain_naive_v - V_OFFSET
assert v_standard in {0, 1}
return v_standard
def to_eth_v(v_raw, chain_id=None):
if chain_id is None:
v = v_raw + V_OFFSET
else:
v = v_raw + CHAIN_ID_OFFSET + 2 * chain_id
return v
def sign_transaction_hash(account, transaction_hash, chain_id):
signature = account.sign_msg_hash(transaction_hash)
(v_raw, r, s) = signature.vrs
v = to_eth_v(v_raw, chain_id)
return (v, r, s)
def _pad_to_eth_word(bytes_val):
return bytes_val.rjust(32, b'\0')
def to_bytes32(val):
return pipe(
val,
to_bytes,
_pad_to_eth_word,
)
def sign_message_hash(key, msg_hash):
signature = key.sign_msg_hash(msg_hash)
(v_raw, r, s) = signature.vrs
v = to_eth_v(v_raw)
eth_signature_bytes = to_bytes32(r) + to_bytes32(s) + to_bytes(v)
return (v, r, s, eth_signature_bytes)
|
the-stack_106_16489
|
"""
An example of test script that implements Pytorch-Lightning
@Author: Francesco Picetti
"""
from argparse import ArgumentParser
import os
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
import src
from train_cnn_lightning import CNN
try:
import pytorch_lightning as pl
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install Pytorch Lightning with: `pip install pytorch_lightning`")
class CNN_with_test(CNN):
def __init__(self, *args, **kwargs):
super(CNN_with_test, self).__init__(*args, **kwargs)
# add here the test routine (a cleaner way is to define the whole model in another file,
# to be loaded in both the training and testing scripts
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
test_loss = self.loss_fn(y_hat, y)
self.log('test_loss', test_loss)
# here you can compute (and log) as many metrics as you want
return test_loss
def main():
parser = ArgumentParser(description="Test a CIFAR classifier based on PyTorch Lightning")
parser.add_argument("--runpath", type=str, required=False,
default="./data/trained_models/lightning",
help="Results directory to be loaded")
parser.add_argument("--num_gpus", type=int, required=False, default=1,
help="Number of GPUs to use")
args = parser.parse_args()
# load args from runpath, to check the training parameters
trainargs = src.read_args(filename=os.path.join(args.runpath, "args.txt"))
# Transform to tensor and normalize to [0, 1]
trans = transforms.Compose([
transforms.ToTensor(),
])
# Load test set, initialize Dataloaders
testset = CIFAR10(root='./data', train=False, download=True, transform=trans)
dataloader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=2)
# initialize a trainer
trainer = pl.Trainer(gpus=args.num_gpus, # how many GPUs to use...
auto_select_gpus=True if args.num_gpus != 0 else False, # ... only if they are available
)
# test the model
trainer.test(model=CNN_with_test(),
dataloaders=dataloader,
ckpt_path=os.path.join(args.runpath, "best_model.ckpt"))
# you can access the loss and other metrics in the trainer attributes
print("Test Loss = %.2e" % trainer.callback_metrics["test_loss"])
print("Done!")
if __name__ == '__main__':
main()
|
the-stack_106_16494
|
#!/usr/bin/env python2.7
# coding=utf-8
"""
Sopel - An IRC Bot
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright © 2012-2014, Elad Alfassa <[email protected]>
Licensed under the Eiffel Forum License 2.
https://sopel.chat
"""
from __future__ import unicode_literals, absolute_import, print_function, division
import argparse
import logging
import os
import platform
import signal
import sys
import time
from sopel import bot, config, logger, tools, __version__
from . import utils
# This is in case someone somehow manages to install Sopel on an old version
# of pip (<9.0.0), which doesn't know about `python_requires`, or tries to run
# from source on an unsupported version of Python.
if sys.version_info < (2, 7) or (
sys.version_info.major >= 3 and sys.version_info < (3, 3)
):
tools.stderr('Error: Sopel requires Python 2.7+ or 3.3+.')
sys.exit(1)
if sys.version_info.major == 2:
now = time.time()
state = 'has reached end of life'
if now >= 1588291200: # 2020-05-01 00:00:00 UTC
state += ' and will receive no further updates'
tools.stderr('Warning: Python 2.x %s. Sopel 8.0 will drop support for it.' % state)
LOGGER = logging.getLogger(__name__)
ERR_CODE = 1
"""Error code: program exited with an error"""
ERR_CODE_NO_RESTART = 2
"""Error code: program exited with an error and should not be restarted
This error code is used to prevent systemd from restarting the bot when it
encounters such an error case.
"""
def run(settings, pid_file, daemon=False):
delay = 20
# Acts as a welcome message, showing the program and platform version at start
print_version()
if not settings.core.ca_certs:
tools.stderr(
'Could not open CA certificates file. SSL will not work properly!')
def signal_handler(sig, frame):
if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:
LOGGER.warning('Got quit signal, shutting down.')
p.quit('Closing')
elif sig == signal.SIGUSR2 or sig == signal.SIGILL:
LOGGER.warning('Got restart signal, shutting down and restarting.')
p.restart('Restarting')
# Define empty variable `p` for bot
p = None
while True:
if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase
break
try:
p = bot.Sopel(settings, daemon=daemon)
if hasattr(signal, 'SIGUSR1'):
signal.signal(signal.SIGUSR1, signal_handler)
if hasattr(signal, 'SIGTERM'):
signal.signal(signal.SIGTERM, signal_handler)
if hasattr(signal, 'SIGINT'):
signal.signal(signal.SIGINT, signal_handler)
if hasattr(signal, 'SIGUSR2'):
signal.signal(signal.SIGUSR2, signal_handler)
if hasattr(signal, 'SIGILL'):
signal.signal(signal.SIGILL, signal_handler)
p.setup()
except KeyboardInterrupt:
break
except Exception:
# In that case, there is nothing we can do.
# If the bot can't setup itself, then it won't run.
# This is a critical case scenario, where the user should have
# direct access to the exception traceback right in the console.
# Besides, we can't know if logging has been set up or not, so
# we can't rely on that here.
tools.stderr('Unexpected error in bot setup')
raise
try:
p.run(settings.core.host, int(settings.core.port))
except KeyboardInterrupt:
break
except Exception:
err_log = logging.getLogger('sopel.exceptions')
err_log.exception('Critical exception in core')
err_log.error('----------------------------------------')
# TODO: This should be handled by command_start
# All we should need here is a return value, but replacing the
# os._exit() call below (at the end) broke ^C.
# This one is much harder to test, so until that one's sorted it
# isn't worth the risk of trying to remove this one.
os.unlink(pid_file)
os._exit(1)
if not isinstance(delay, int):
break
if p.wantsrestart:
return -1
if p.hasquit:
break
LOGGER.warning('Disconnected. Reconnecting in %s seconds...', delay)
time.sleep(delay)
# TODO: This should be handled by command_start
# All we should need here is a return value, but making this
# a return makes Sopel hang on ^C after it says "Closed!"
os.unlink(pid_file)
os._exit(0)
def add_legacy_options(parser):
# TL;DR: option -d/--fork is not deprecated.
# When the legacy action is replaced in Sopel 8, 'start' will become the
# new default action, with its arguments.
# The option -d/--fork is used by both actions (start and legacy),
# and it has the same meaning and behavior, therefore it is not deprecated.
parser.add_argument("-d", '--fork', action="store_true",
dest="daemonize",
help="Daemonize Sopel.")
parser.add_argument("-q", '--quit', action="store_true", dest="quit",
help=(
"Gracefully quit Sopel "
"(deprecated, and will be removed in Sopel 8; "
"use ``sopel stop`` instead)"))
parser.add_argument("-k", '--kill', action="store_true", dest="kill",
help=(
"Kill Sopel "
"(deprecated, and will be removed in Sopel 8; "
"use ``sopel stop --kill`` instead)"))
parser.add_argument("-r", '--restart', action="store_true", dest="restart",
help=(
"Restart Sopel "
"(deprecated, and will be removed in Sopel 8; "
"use `sopel restart` instead)"))
parser.add_argument("-l", '--list', action="store_true",
dest="list_configs",
help=(
"List all config files found"
"(deprecated, and will be removed in Sopel 8; "
"use ``sopel-config list`` instead)"))
parser.add_argument('--quiet', action="store_true", dest="quiet",
help="Suppress all output")
parser.add_argument('-w', '--configure-all', action='store_true',
dest='wizard',
help=(
"Run the configuration wizard "
"(deprecated, and will be removed in Sopel 8; "
"use `sopel configure` instead)"))
parser.add_argument('--configure-modules', action='store_true',
dest='mod_wizard',
help=(
"Run the configuration wizard, but only for the "
"plugin configuration options "
"(deprecated, and will be removed in Sopel 8; "
"use ``sopel configure --plugins`` instead)"))
parser.add_argument('-v', action="store_true",
dest='version_legacy',
help=(
"Show version number and exit "
"(deprecated, and will be removed in Sopel 8; "
"use ``-V/--version`` instead)"))
parser.add_argument('-V', '--version', action='store_true',
dest='version',
help='Show version number and exit')
def build_parser():
"""Build an ``argparse.ArgumentParser`` for the bot"""
parser = argparse.ArgumentParser(description='Sopel IRC Bot',
usage='%(prog)s [options]')
add_legacy_options(parser)
utils.add_common_arguments(parser)
subparsers = parser.add_subparsers(
title='subcommands',
description='List of Sopel\'s subcommands',
dest='action',
metavar='{start,configure,stop,restart}')
# manage `legacy` subcommand
parser_legacy = subparsers.add_parser('legacy')
add_legacy_options(parser_legacy)
utils.add_common_arguments(parser_legacy)
# manage `start` subcommand
parser_start = subparsers.add_parser(
'start',
description='Start a Sopel instance. '
'This command requires an existing configuration file '
'that can be generated with ``sopel configure``.',
help='Start a Sopel instance')
parser_start.add_argument(
'-d', '--fork',
dest='daemonize',
action='store_true',
default=False,
help='Run Sopel as a daemon (fork). This bot will safely run in the '
'background. The instance will be named after the name of the '
'configuration file used to run it. '
'To stop it, use ``sopel stop`` (with the same configuration).')
parser_start.add_argument(
'--quiet',
action="store_true",
dest="quiet",
help="Suppress all output")
utils.add_common_arguments(parser_start)
# manage `configure` subcommand
parser_configure = subparsers.add_parser(
'configure',
description='Run the configuration wizard. It can be used to create '
'a new configuration file or to update an existing one.',
help='Sopel\'s Wizard tool')
parser_configure.add_argument(
'--plugins',
action='store_true',
default=False,
dest='plugins',
help='Check for Sopel plugins that require configuration, and run '
'their configuration wizards.')
utils.add_common_arguments(parser_configure)
# manage `stop` subcommand
parser_stop = subparsers.add_parser(
'stop',
description='Stop a running Sopel instance. '
'This command determines the instance to quit by the name '
'of the configuration file used ("default", or the one '
'from the ``-c``/``--config`` option). '
'This command should be used when the bot is running in '
'the background from ``sopel start -d``, and should not '
'be used when Sopel is managed by a process manager '
'(like systemd or supervisor).',
help='Stop a running Sopel instance')
parser_stop.add_argument(
'-k', '--kill',
action='store_true',
default=False,
help='Kill Sopel without a graceful quit')
parser_stop.add_argument(
'--quiet',
action="store_true",
dest="quiet",
help="Suppress all output")
utils.add_common_arguments(parser_stop)
# manage `restart` subcommand
parser_restart = subparsers.add_parser(
'restart',
description='Restart a running Sopel instance',
help='Restart a running Sopel instance')
parser_restart.add_argument(
'--quiet',
action="store_true",
dest="quiet",
help="Suppress all output")
utils.add_common_arguments(parser_restart)
return parser
def check_not_root():
"""Check if root is running the bot.
It raises a ``RuntimeError`` if the user has root privileges on Linux or
if it is the ``Administrator`` account on Windows.
"""
opersystem = platform.system()
if opersystem in ["Linux", "Darwin"]:
# Linux/Mac
if os.getuid() == 0 or os.geteuid() == 0:
raise RuntimeError('Error: Do not run Sopel with root privileges.')
elif opersystem in ["Windows"]:
# Windows
if os.environ.get("USERNAME") == "Administrator":
raise RuntimeError('Error: Do not run Sopel as Administrator.')
else:
tools.stderr(
"Warning: %s is an uncommon operating system platform. "
"Sopel should still work, but please contact Sopel's developers "
"if you experience issues."
% opersystem)
def print_version():
"""Print Python version and Sopel version on stdout."""
py_ver = '%s.%s.%s' % (sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro)
print('Sopel %s (running on Python %s)' % (__version__, py_ver))
print('https://sopel.chat/')
def print_config(configdir):
"""Print list of available configurations from config directory."""
configs = utils.enumerate_configs(configdir)
print('Config files in %s:' % configdir)
configfile = None
for configfile in configs:
print('\t%s' % configfile)
if not configfile:
print('\tNone found')
print('-------------------------')
def get_configuration(options):
"""Get or create a configuration object from ``options``.
:param options: argument parser's options
:type options: ``argparse.Namespace``
:return: a configuration object
:rtype: :class:`sopel.config.Config`
This may raise a :exc:`sopel.config.ConfigurationError` if the
configuration file is invalid.
.. seealso::
The configuration file is loaded by
:func:`~sopel.cli.run.utils.load_settings` or created using the
configuration wizard.
"""
try:
settings = utils.load_settings(options)
except config.ConfigurationNotFound as error:
print(
"Welcome to Sopel!\n"
"I can't seem to find the configuration file, "
"so let's generate it!\n")
settings = utils.wizard(error.filename)
settings._is_daemonized = options.daemonize
return settings
def get_pid_filename(options, pid_dir):
"""Get the pid file name in ``pid_dir`` from the given ``options``.
:param options: command line options
:param str pid_dir: path to the pid directory
:return: absolute filename of the pid file
By default, it's ``sopel.pid``, but if a configuration filename is given
in the ``options``, its basename is used to generate the filename, as:
``sopel-{basename}.pid`` instead.
"""
name = 'sopel.pid'
if options.config and options.config != 'default':
basename = os.path.basename(options.config)
if basename.endswith('.cfg'):
basename = basename[:-4]
name = 'sopel-%s.pid' % basename
return os.path.abspath(os.path.join(pid_dir, name))
def get_running_pid(filename):
"""Retrieve the PID number from the given ``filename``.
:param str filename: path to file to read the PID from
:return: the PID number of a Sopel instance if running, ``None`` otherwise
:rtype: integer
This function tries to retrieve a PID number from the given ``filename``,
as an integer, and returns ``None`` if the file is not found or if the
content is not an integer.
"""
if not os.path.isfile(filename):
return
with open(filename, 'r') as pid_file:
try:
return int(pid_file.read())
except ValueError:
pass
def command_start(opts):
"""Start a Sopel instance"""
# Step One: Get the configuration file and prepare to run
try:
config_module = get_configuration(opts)
except config.ConfigurationError as e:
tools.stderr(e)
return ERR_CODE_NO_RESTART
if config_module.core.not_configured:
tools.stderr('Bot is not configured, can\'t start')
return ERR_CODE_NO_RESTART
# Step Two: Handle process-lifecycle options and manage the PID file
pid_dir = config_module.core.pid_dir
pid_file_path = get_pid_filename(opts, pid_dir)
pid = get_running_pid(pid_file_path)
if pid is not None and tools.check_pid(pid):
tools.stderr('There\'s already a Sopel instance running '
'with this config file.')
tools.stderr('Try using either the `sopel stop` '
'or the `sopel restart` command.')
return ERR_CODE
if opts.daemonize:
child_pid = os.fork()
if child_pid != 0:
return
with open(pid_file_path, 'w') as pid_file:
pid_file.write(str(os.getpid()))
# Step Three: Run Sopel
ret = run(config_module, pid_file_path)
# Step Four: Shutdown Clean-Up
os.unlink(pid_file_path)
if ret == -1:
# Restart
os.execv(sys.executable, ['python'] + sys.argv)
else:
# Quit
return ret
def command_configure(opts):
"""Sopel Configuration Wizard"""
configpath = utils.find_config(opts.configdir, opts.config)
if opts.plugins:
utils.plugins_wizard(configpath)
else:
utils.wizard(configpath)
def command_stop(opts):
"""Stop a running Sopel instance"""
# Get Configuration
try:
settings = utils.load_settings(opts)
except config.ConfigurationNotFound as error:
tools.stderr('Configuration "%s" not found' % error.filename)
return ERR_CODE
if settings.core.not_configured:
tools.stderr('Sopel is not configured, can\'t stop')
return ERR_CODE
# Configure logging
logger.setup_logging(settings)
# Get Sopel's PID
filename = get_pid_filename(opts, settings.core.pid_dir)
pid = get_running_pid(filename)
if pid is None or not tools.check_pid(pid):
tools.stderr('Sopel is not running!')
return ERR_CODE
# Stop Sopel
if opts.kill:
tools.stderr('Killing the Sopel')
os.kill(pid, signal.SIGKILL)
return
tools.stderr('Signaling Sopel to stop gracefully')
if hasattr(signal, 'SIGUSR1'):
os.kill(pid, signal.SIGUSR1)
else:
# Windows will not generate SIGTERM itself
# https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
os.kill(pid, signal.SIGTERM)
def command_restart(opts):
"""Restart a running Sopel instance"""
# Get Configuration
try:
settings = utils.load_settings(opts)
except config.ConfigurationNotFound as error:
tools.stderr('Configuration "%s" not found' % error.filename)
return ERR_CODE
if settings.core.not_configured:
tools.stderr('Sopel is not configured, can\'t stop')
return ERR_CODE
# Configure logging
logger.setup_logging(settings)
# Get Sopel's PID
filename = get_pid_filename(opts, settings.core.pid_dir)
pid = get_running_pid(filename)
if pid is None or not tools.check_pid(pid):
tools.stderr('Sopel is not running!')
return ERR_CODE
tools.stderr('Asking Sopel to restart')
if hasattr(signal, 'SIGUSR2'):
os.kill(pid, signal.SIGUSR2)
else:
# Windows will not generate SIGILL itself
# https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
os.kill(pid, signal.SIGILL)
def command_legacy(opts):
"""Legacy Sopel run script
The ``legacy`` command manages the old-style ``sopel`` command line tool.
Most of its features are replaced by the following commands:
* ``sopel start`` replaces the default behavior (run the bot)
* ``sopel stop`` replaces the ``--quit/--kill`` options
* ``sopel restart`` replaces the ``--restart`` option
* ``sopel configure`` replaces the
``-w/--configure-all/--configure-modules`` options
The ``-v`` option for "version" is deprecated, ``-V/--version`` should be
used instead.
.. seealso::
The github issue `#1471`__ tracks various changes requested for future
versions of Sopel, some of them related to this legacy command.
.. __: https://github.com/sopel-irc/sopel/issues/1471
"""
# Step One: Handle "No config needed" options
if opts.version:
print_version()
return
elif opts.version_legacy:
tools.stderr(
'WARNING: option -v is deprecated; '
'use `sopel -V/--version` instead')
print_version()
return
configpath = utils.find_config(opts.configdir, opts.config)
if opts.wizard:
tools.stderr(
'WARNING: option -w/--configure-all is deprecated; '
'use `sopel configure` instead')
utils.wizard(configpath)
return
if opts.mod_wizard:
tools.stderr(
'WARNING: option --configure-modules is deprecated; '
'use `sopel configure --plugins` instead')
utils.plugins_wizard(configpath)
return
if opts.list_configs:
tools.stderr(
'WARNING: option --list is deprecated; '
'use `sopel-config list` instead')
print_config(opts.configdir)
return
# Step Two: Get the configuration file and prepare to run
try:
config_module = get_configuration(opts)
except config.ConfigurationError as e:
tools.stderr(e)
return ERR_CODE_NO_RESTART
if config_module.core.not_configured:
tools.stderr('Bot is not configured, can\'t start')
return ERR_CODE_NO_RESTART
# Step Three: Handle process-lifecycle options and manage the PID file
pid_dir = config_module.core.pid_dir
pid_file_path = get_pid_filename(opts, pid_dir)
old_pid = get_running_pid(pid_file_path)
if old_pid is not None and tools.check_pid(old_pid):
if not opts.quit and not opts.kill and not opts.restart:
tools.stderr(
'There\'s already a Sopel instance running with this config file')
tools.stderr(
'Try using either the `sopel stop` command or the `sopel restart` command')
return ERR_CODE
elif opts.kill:
tools.stderr(
'WARNING: option -k/--kill is deprecated; '
'use `sopel stop --kill` instead')
tools.stderr('Killing the Sopel')
os.kill(old_pid, signal.SIGKILL)
return
elif opts.quit:
tools.stderr(
'WARNING: options -q/--quit is deprecated; '
'use `sopel stop` instead')
tools.stderr('Signaling Sopel to stop gracefully')
if hasattr(signal, 'SIGUSR1'):
os.kill(old_pid, signal.SIGUSR1)
else:
# Windows will not generate SIGTERM itself
# https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
os.kill(old_pid, signal.SIGTERM)
return
elif opts.restart:
tools.stderr(
'WARNING: options --restart is deprecated; '
'use `sopel restart` instead')
tools.stderr('Asking Sopel to restart')
if hasattr(signal, 'SIGUSR2'):
os.kill(old_pid, signal.SIGUSR2)
else:
# Windows will not generate SIGILL itself
# https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
os.kill(old_pid, signal.SIGILL)
return
elif opts.kill or opts.quit or opts.restart:
tools.stderr('Sopel is not running!')
return ERR_CODE
if opts.daemonize:
child_pid = os.fork()
if child_pid != 0:
return
with open(pid_file_path, 'w') as pid_file:
pid_file.write(str(os.getpid()))
# Step Four: Initialize and run Sopel
ret = run(config_module, pid_file_path)
os.unlink(pid_file_path)
if ret == -1:
os.execv(sys.executable, ['python'] + sys.argv)
else:
return ret
def main(argv=None):
"""Sopel run script entry point"""
try:
# Step One: Parse The Command Line
parser = build_parser()
# make sure to have an action first (`legacy` by default)
# TODO: `start` should be the default in Sopel 8
argv = argv or sys.argv[1:]
if not argv:
argv = ['legacy']
elif argv[0].startswith('-') and argv[0] not in ['-h', '--help']:
argv = ['legacy'] + argv
opts = parser.parse_args(argv)
# Step Two: "Do not run as root" checks
try:
check_not_root()
except RuntimeError as err:
tools.stderr('%s' % err)
return ERR_CODE
# Step Three: Handle command
action = getattr(opts, 'action', 'legacy')
command = {
'legacy': command_legacy,
'start': command_start,
'configure': command_configure,
'stop': command_stop,
'restart': command_restart,
}.get(action)
return command(opts)
except KeyboardInterrupt:
print("\n\nInterrupted")
return ERR_CODE
if __name__ == '__main__':
sys.exit(main())
|
the-stack_106_16495
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet balance RPC methods."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
RANDOM_COINBASE_ADDRESS = 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ'
def create_transactions(node, address, amt, fees):
# Create and sign raw transactions from node to address for amt.
# Creates a transaction for each fee and returns an array
# of the raw transactions.
utxos = node.listunspent(0)
# Create transactions
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total > amt:
break
txs = []
for fee in fees:
outputs = {address: amt, node.getrawchangeaddress(): ins_total - amt - fee}
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
txs.append(raw_tx)
return txs
class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Check that nodes don't own any UTXOs
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Mining one block for each node")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(100, RANDOM_COINBASE_ADDRESS)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 50)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
# Send 40 CRM from 0 to 1 and 60 CRM from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test getbalance and getunconfirmedbalance with unconfirmed inputs")
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('29.99')) # change from node 1's send
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('29.99'))
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('0')) # Doesn't include output of node 0's send since it was spent
# Node 1 bumps the transaction fee and resends
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.sync_all()
self.log.info("Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs")
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60')) # output of node 1's send
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60'))
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('0')) # Doesn't include output of node 0's send since it was spent
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('0'))
self.nodes[1].generatetoaddress(1, RANDOM_COINBASE_ADDRESS)
self.sync_all()
# balances are correct after the transactions are confirmed
assert_equal(self.nodes[0].getbalance(), Decimal('69.99')) # node 1's send plus change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('29.98')) # change from node 0's send
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, RANDOM_COINBASE_ADDRESS)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
if __name__ == '__main__':
WalletTest().main()
|
the-stack_106_16497
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
'''
TZlibTransport provides a compressed transport and transport factory
class, using the python standard library zlib module to implement
data compression.
'''
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import object
import zlib
from io import StringIO
from .TTransport import TTransportBase, CReadableTransport
class TZlibTransportFactory(object):
'''
Factory transport that builds zlib compressed transports.
This factory caches the last single client/transport that it was passed
and returns the same TZlibTransport object that was created.
This caching means the TServer class will get the _same_ transport
object for both input and output transports from this factory.
(For non-threaded scenarios only, since the cache only holds one object)
The purpose of this caching is to allocate only one TZlibTransport where
only one is really needed (since it must have separate read/write buffers),
and makes the statistics from getCompSavings() and getCompRatio()
easier to understand.
'''
# class scoped cache of last transport given and zlibtransport returned
_last_trans = None
_last_z = None
def getTransport(self, trans, compresslevel=9):
'''Wrap a transport , trans, with the TZlibTransport
compressed transport class, returning a new
transport to the caller.
@param compresslevel: The zlib compression level, ranging
from 0 (no compression) to 9 (best compression). Defaults to 9.
@type compresslevel: int
This method returns a TZlibTransport which wraps the
passed C{trans} TTransport derived instance.
'''
if trans == self._last_trans:
return self._last_z
ztrans = TZlibTransport(trans, compresslevel)
self._last_trans = trans
self._last_z = ztrans
return ztrans
class TZlibTransport(TTransportBase, CReadableTransport):
'''
Class that wraps a transport with zlib, compressing writes
and decompresses reads, using the python standard
library zlib module.
'''
# Read buffer size for the python fastbinary C extension,
# the TBinaryProtocolAccelerated class.
DEFAULT_BUFFSIZE = 4096
def __init__(self, trans, compresslevel=9):
'''
Create a new TZlibTransport, wrapping C{trans}, another
TTransport derived object.
@param trans: A thrift transport object, i.e. a TSocket() object.
@type trans: TTransport
@param compresslevel: The zlib compression level, ranging
from 0 (no compression) to 9 (best compression). Default is 9.
@type compresslevel: int
'''
self.__trans = trans
self.compresslevel = compresslevel
self.__rbuf = StringIO()
self.__wbuf = StringIO()
self._init_zlib()
self._init_stats()
def _reinit_buffers(self):
'''
Internal method to initialize/reset the internal StringIO objects
for read and write buffers.
'''
self.__rbuf = StringIO()
self.__wbuf = StringIO()
def _init_stats(self):
'''
Internal method to reset the internal statistics counters
for compression ratios and bandwidth savings.
'''
self.bytes_in = 0
self.bytes_out = 0
self.bytes_in_comp = 0
self.bytes_out_comp = 0
def _init_zlib(self):
'''
Internal method for setting up the zlib compression and
decompression objects.
'''
self._zcomp_read = zlib.decompressobj()
self._zcomp_write = zlib.compressobj(self.compresslevel)
def getCompRatio(self):
'''
Get the current measured compression ratios (in,out) from
this transport.
Returns a tuple of:
(inbound_compression_ratio, outbound_compression_ratio)
The compression ratios are computed as:
compressed / uncompressed
E.g., data that compresses by 10x will have a ratio of: 0.10
and data that compresses to half of ts original size will
have a ratio of 0.5
None is returned if no bytes have yet been processed in
a particular direction.
'''
r_percent, w_percent = (None, None)
if self.bytes_in > 0:
r_percent = self.bytes_in_comp / self.bytes_in
if self.bytes_out > 0:
w_percent = self.bytes_out_comp / self.bytes_out
return (r_percent, w_percent)
def getCompSavings(self):
'''
Get the current count of saved bytes due to data
compression.
Returns a tuple of:
(inbound_saved_bytes, outbound_saved_bytes)
Note: if compression is actually expanding your
data (only likely with very tiny thrift objects), then
the values returned will be negative.
'''
r_saved = self.bytes_in - self.bytes_in_comp
w_saved = self.bytes_out - self.bytes_out_comp
return (r_saved, w_saved)
def isOpen(self):
'''Return the underlying transport's open status'''
return self.__trans.isOpen()
def open(self):
"""Open the underlying transport"""
self._init_stats()
return self.__trans.open()
def listen(self):
'''Invoke the underlying transport's listen() method'''
self.__trans.listen()
def accept(self):
'''Accept connections on the underlying transport'''
return self.__trans.accept()
def close(self):
'''Close the underlying transport,'''
self._reinit_buffers()
self._init_zlib()
return self.__trans.close()
def read(self, sz):
'''
Read up to sz bytes from the decompressed bytes buffer, and
read from the underlying transport if the decompression
buffer is empty.
'''
ret = self.__rbuf.read(sz)
if len(ret) > 0:
return ret
# keep reading from transport until something comes back
while True:
if self.readComp(sz):
break
ret = self.__rbuf.read(sz)
return ret
def readComp(self, sz):
'''
Read compressed data from the underlying transport, then
decompress it and append it to the internal StringIO read buffer
'''
zbuf = self.__trans.read(sz)
zbuf = self._zcomp_read.unconsumed_tail + zbuf
buf = self._zcomp_read.decompress(zbuf)
self.bytes_in += len(zbuf)
self.bytes_in_comp += len(buf)
old = self.__rbuf.read()
self.__rbuf = StringIO(old + buf)
if len(old) + len(buf) == 0:
return False
return True
def write(self, buf):
'''
Write some bytes, putting them into the internal write
buffer for eventual compression.
'''
self.__wbuf.write(buf)
def flush(self):
'''
Flush any queued up data in the write buffer and ensure the
compression buffer is flushed out to the underlying transport
'''
wout = self.__wbuf.getvalue()
if len(wout) > 0:
zbuf = self._zcomp_write.compress(wout)
self.bytes_out += len(wout)
self.bytes_out_comp += len(zbuf)
else:
zbuf = ''
ztail = self._zcomp_write.flush(zlib.Z_SYNC_FLUSH)
self.bytes_out_comp += len(ztail)
if (len(zbuf) + len(ztail)) > 0:
self.__wbuf = StringIO()
self.__trans.write(zbuf + ztail)
self.__trans.flush()
@property
def cstringio_buf(self):
'''Implement the CReadableTransport interface'''
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
'''Implement the CReadableTransport interface for refill'''
retstring = partialread
if reqlen < self.DEFAULT_BUFFSIZE:
retstring += self.read(self.DEFAULT_BUFFSIZE)
while len(retstring) < reqlen:
retstring += self.read(reqlen - len(retstring))
self.__rbuf = StringIO(retstring)
return self.__rbuf
|
the-stack_106_16503
|
from __future__ import annotations
import numpy as np
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from torch.cuda import is_available
from model import helper_functions as help_fn
class SEGMpredictor:
def __init__(self, model_path: str):
cfg = get_cfg()
cfg.merge_from_file(
model_zoo.get_config_file(
"COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"
)
)
cfg.MODEL.WEIGHTS = model_path
cfg.TEST.EVAL_PERIOD = 1000
cfg.INPUT.MIN_SIZE_TRAIN = 2160
cfg.INPUT.MAX_SIZE_TRAIN = 3130
cfg.INPUT.MIN_SIZE_TEST = 2160
cfg.INPUT.MAX_SIZE_TEST = 3130
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.1
cfg.INPUT.FORMAT = "BGR"
cfg.DATALOADER.NUM_WORKERS = 4
cfg.SOLVER.IMS_PER_BATCH = 3
cfg.SOLVER.BASE_LR = 0.01
cfg.SOLVER.GAMMA = 0.1
cfg.SOLVER.STEPS = (1500,)
if not is_available():
cfg.MODEL.DEVICE = "cpu"
cfg.SOLVER.MAX_ITER = 17000
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg.SOLVER.CHECKPOINT_PERIOD = cfg.TEST.EVAL_PERIOD
cfg.TEST.DETECTIONS_PER_IMAGE = 1000
cfg.OUTPUT_DIR = "./output"
self.predictor = DefaultPredictor(cfg)
def __call__(self, img: np.ndarray) -> list[np.ndarray]:
outputs = self.predictor(img)
prediction = outputs["instances"].pred_masks.cpu().numpy()
contours = []
for pred in prediction:
contour_list = help_fn.get_contours_from_mask(pred)
contours.append(help_fn.get_larger_contour(contour_list))
return contours
|
the-stack_106_16504
|
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
from RGT.XML.SVG.Attribs.positionAttributes import PositionAttributes
from types import StringType
class FePointLightNode(BasicSvgNode, PositionAttributes):
svgNodeType = BasicSvgNode.SVG_FE_POINT_LIGHT_NODE
ATTRIBUTE_Z = 'z'
def __init__(self, ownerDoc, x=None, y=None, z=None):
BasicSvgNode.__init__(self, ownerDoc, 'fePointLight')
PositionAttributes.__init__(self)
self.setX(x)
self.setY(y)
self.setZ(z)
self._allowedSvgChildNodes.update({self.SVG_ANIMATE_NODE, self.SVG_SET_NODE})
def setZ(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_Z, data)
def getZ(self):
node = self._getNodeAttribute(self.ATTRIBUTE_Z)
if node is not None:
return node.nodeValue
return None
|
the-stack_106_16505
|
# TestSwiftExpressionsInMethodsPureSwift.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Tests simple swift expressions
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestExpressionsInSwiftMethodsPureSwift(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
def check_expression(self, expression, expected_result, use_summary=True):
value = self.frame().EvaluateExpression(expression)
self.assertTrue(value.IsValid(), expression + "returned a valid value")
if use_summary:
answer = value.GetSummary()
else:
answer = value.GetValue()
report_str = "%s expected: %s got: %s" % (
expression, expected_result, answer)
self.assertTrue(answer == expected_result, report_str)
@swiftTest
def test_expressions_in_methods(self):
"""Tests that we can run simple Swift expressions correctly"""
self.build()
lldbutil.run_to_source_breakpoint(
self, 'Stop here in Pure Swift class', lldb.SBFileSpec('main.swift'))
self.check_expression("m_computed_ivar == 5", "true")
self.check_expression("m_ivar", "10", use_summary=False)
self.check_expression("self.m_ivar == 11", "false")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
|
the-stack_106_16506
|
from typing import List
class Solution:
def solveNQueens(self, n: int) -> List[List[str]]:
def backTrack(i):
nonlocal col_set, lc_set, rc_set, path, res
for j in range(n):
if (j in col_set) or ((i + j) in lc_set) or ((i - j) in rc_set):
continue
col_set.add(j)
lc_set.add(i + j)
rc_set.add(i - j)
path.append((i, j))
if i == n-1:
tmp = []
for item in path:
tmp.append("."*item[1]+'Q'+'.'*(i-item[1]))
res.append(tmp[:])
# res.append(path[:])
# return
backTrack(i + 1)
path.pop()
col_set.discard(j)
lc_set.discard(i + j)
rc_set.discard(i - j)
col_set = set()
lc_set = set()
rc_set = set()
res = []
path = []
backTrack(0)
return res
print(Solution().solveNQueens(4))
|
the-stack_106_16509
|
# This workload tests submitting many actor methods.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import ray
from ray.cluster_utils import Cluster
num_redis_shards = 5
redis_max_memory = 10**8
object_store_memory = 10**8
num_nodes = 10
message = ("Make sure there is enough memory on this machine to run this "
"workload. We divide the system memory by 2 to provide a buffer.")
assert (num_nodes * object_store_memory + num_redis_shards * redis_max_memory <
ray.utils.get_system_memory() / 2), message
# Simulate a cluster on one machine.
cluster = Cluster()
for i in range(num_nodes):
cluster.add_node(
redis_port=6379 if i == 0 else None,
num_redis_shards=num_redis_shards if i == 0 else None,
num_cpus=5,
num_gpus=0,
resources={str(i): 2},
object_store_memory=object_store_memory,
redis_max_memory=redis_max_memory)
ray.init(address=cluster.address)
# Run the workload.
@ray.remote
class Actor(object):
def __init__(self):
self.value = 0
def method(self):
self.value += 1
actors = [
Actor._remote([], {}, num_cpus=0.1, resources={str(i % num_nodes): 0.1})
for i in range(num_nodes * 5)
]
iteration = 0
start_time = time.time()
previous_time = start_time
while True:
for _ in range(100):
previous_ids = [a.method.remote() for a in actors]
ray.get(previous_ids)
new_time = time.time()
print("Iteration {}:\n"
" - Iteration time: {}.\n"
" - Absolute time: {}.\n"
" - Total elapsed time: {}.".format(
iteration, new_time - previous_time, new_time,
new_time - start_time))
previous_time = new_time
iteration += 1
|
the-stack_106_16512
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from main.models import SettingModel
from studies.models import Study
class Intervention(SettingModel):
# This is used internally to provide backwards compatibility with the old version of this model. All old fields are
# still used if this is 1.
version = models.PositiveIntegerField('INTERNAL - Describes which version of the intervention model is used', default=2)
period = models.TextField(
_('Wat is de periode waarbinnen de interventie plaatsvindt?'),
help_text=_('De interventie vindt plaats binnen het schooljaar '
'2018-2019'),
blank=True,
)
multiple_sessions = models.BooleanField(
_('Zal de interventie vaker dan één keer plaatsvinden?'),
default=False,
)
session_frequency = models.TextField(
_('Wat is de frequentie van de interventie?'),
blank=True,
)
duration = models.PositiveIntegerField(
_('Wat is de duur van de interventie per sessie in minuten?'),
blank=True,
null=True,
)
experimenter = models.TextField(
_('Wie voert de interventie uit? Leg uit wat de rol en de functie van de persoon is: '),
blank=True,
)
description = models.TextField(
_('Geef een beschrijving van de experimentele interventie'),
blank=True,
)
has_controls = models.BooleanField(
_('Is er sprake van een controlegroep?'),
default=False,
)
controls_description = models.TextField(
_('Geef een beschrijving van de controleinterventie'),
blank=True,
)
measurement = models.TextField(
_('Hoe wordt het effect van de interventie gemeten?'),
help_text=_('Wanneer u de deelnemer extra taken laat uitvoeren, \
dus een taak die niet behoort tot het reguliere onderwijspakket, dan moet \
u op de vorige pagina ook "takenonderzoek" aanvinken.'),
blank=True,
)
extra_task = models.BooleanField(
_('Voert de leerling nog een taak uit die niet onder het leerplan valt?'),
help_text=_('Moet het nog een taak doen, zoals het invullen van een (onderzoeks)vragenlijst, die niet binnen de interventie zelf valt?'),
default=False,
)
# Legacy, not used in version 2 of the form
amount_per_week = models.PositiveIntegerField(
_('Hoe vaak per week vindt de interventiesessie plaats?'),
blank=True,
default=1,
)
# References
study = models.OneToOneField(
Study,
on_delete=models.CASCADE,
)
|
the-stack_106_16513
|
from django.db import models
from dcpython.app.models import ServiceSync
import feedparser
from dcpython.events.models import Event
from django.conf import settings
class PlaylistManager(models.Manager):
def sync(self, url=None):
url = url or settings.YOUTUBE_PLAYLIST_FEED
feed = feedparser.parse(url)
try:
last_synced = ServiceSync.objects.get(service=url)
except:
last_synced = None
else:
last_synced = last_synced.last_synced
if feed.feed.updated == last_synced:
return
for entry in feed.entries:
if not entry.summary:
continue
try:
event = Event.objects.get(meetup_url=entry.summary)
except Event.DoesNotExist:
continue
remote_id = entry.id.split('/')[-1]
defaults = {
'event': event,
'updated': entry.updated,
}
playlist, created = Playlist.objects.get_or_create(remote_id=remote_id, defaults=defaults)
if created:
continue
if playlist.updated == entry.updated:
continue
playlist.event = event
playlist.updated = entry.updated
playlist.save()
last_synced, created = ServiceSync.objects.get_or_create(service=url, defaults={'last_synced': feed.feed.updated})
if not created:
last_synced.last_synced = feed.feed.updated
last_synced.save()
class Playlist(models.Model):
event = models.ForeignKey(Event, related_name='playlists')
remote_id = models.CharField(max_length=100, blank=True, null=True)
updated = models.CharField(max_length=30)
objects = PlaylistManager()
|
the-stack_106_16515
|
#! /usr/bin/python3
# Filled orders may not be re‐opened, so only orders not involving BTC (and so
# which cannot have expired order matches) may be filled.
import struct
import decimal
D = decimal.Decimal
import logging
from lib import (config, exceptions, bitcoin, util)
FORMAT = '>QQQQHQ'
LENGTH = 8 + 8 + 8 + 8 + 2 + 8
ID = 10
def exact_penalty (db, address, block_index, order_match_id):
# Penalize addresses that don’t make BTC payments. If an address lets an
# order match expire, expire sell BTC orders from that address.
cursor = db.cursor()
# Orders.
bad_orders = list(cursor.execute('''SELECT * FROM orders \
WHERE (source = ? AND give_asset = ? AND status = ?)''',
(address, config.BTC, 'open')))
for bad_order in bad_orders:
cancel_order(db, bad_order, 'expired', block_index)
if not (block_index >= 314250 or config.TESTNET): # Protocol change.
# Order matches.
bad_order_matches = list(cursor.execute('''SELECT * FROM order_matches \
WHERE ((tx0_address = ? AND forward_asset = ?) OR (tx1_address = ? AND backward_asset = ?)) AND (status = ?)''',
(address, config.BTC, address, config.BTC, 'pending')))
for bad_order_match in bad_order_matches:
cancel_order_match(db, bad_order_match, 'expired', block_index)
cursor.close()
return
def cancel_order (db, order, status, block_index):
cursor = db.cursor()
# Update status of order.
bindings = {
'status': status,
'tx_hash': order['tx_hash']
}
sql='update orders set status = :status where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'orders', bindings)
if order['give_asset'] != config.BTC: # Can’t credit BTC.
util.credit(db, block_index, order['source'], order['give_asset'], order['give_remaining'], action='cancel order', event=order['tx_hash'])
if status == 'expired':
# Record offer expiration.
bindings = {
'order_index': order['tx_index'],
'order_hash': order['tx_hash'],
'source': order['source'],
'block_index': block_index
}
sql='insert into order_expirations values(:order_index, :order_hash, :source, :block_index)'
cursor.execute(sql, bindings)
cursor.close()
def cancel_order_match (db, order_match, status, block_index):
'''
May only be cancelled by callbacks.'''
cursor = db.cursor()
# Skip order matches just expired as a penalty. (Not very efficient.)
if not (block_index >= 314250 or config.TESTNET): # Protocol change.
order_matches = list(cursor.execute('''SELECT * FROM order_matches \
WHERE (id = ? AND status = ?)''',
(order_match['id'], 'expired')))
if order_matches:
cursor.close()
return
# Update status of order match.
bindings = {
'status': status,
'order_match_id': order_match['id']
}
sql='update order_matches set status = :status where id = :order_match_id'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'order_matches', bindings)
order_match_id = order_match['tx0_hash'] + order_match['tx1_hash']
# If tx0 is dead, credit address directly; if not, replenish give remaining, get remaining, and fee required remaining.
orders = list(cursor.execute('''SELECT * FROM orders \
WHERE tx_index = ?''',
(order_match['tx0_index'],)))
assert len(orders) == 1
tx0_order = orders[0]
if tx0_order['status'] in ('expired', 'cancelled'):
tx0_order_status = tx0_order['status']
if order_match['forward_asset'] != config.BTC:
util.credit(db, block_index, order_match['tx0_address'],
order_match['forward_asset'],
order_match['forward_quantity'], action='order {}'.format(tx0_order_status), event=order_match['id'])
else:
tx0_give_remaining = tx0_order['give_remaining'] + order_match['forward_quantity']
tx0_get_remaining = tx0_order['get_remaining'] + order_match['backward_quantity']
if tx0_order['get_asset'] == config.BTC and (block_index >= 297000 or config.TESTNET): # Protocol change.
tx0_fee_required_remaining = tx0_order['fee_required_remaining'] + order_match['fee_paid']
else:
tx0_fee_required_remaining = tx0_order['fee_required_remaining']
tx0_order_status = tx0_order['status']
bindings = {
'give_remaining': tx0_give_remaining,
'get_remaining': tx0_get_remaining,
'status': tx0_order_status,
'fee_required_remaining': tx0_fee_required_remaining,
'tx_hash': order_match['tx0_hash']
}
sql='update orders set give_remaining = :give_remaining, get_remaining = :get_remaining, fee_required_remaining = :fee_required_remaining where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'orders', bindings)
# If tx1 is dead, credit address directly; if not, replenish give remaining, get remaining, and fee required remaining.
orders = list(cursor.execute('''SELECT * FROM orders \
WHERE tx_index = ?''',
(order_match['tx1_index'],)))
assert len(orders) == 1
tx1_order = orders[0]
if tx1_order['status'] in ('expired', 'cancelled'):
tx1_order_status = tx1_order['status']
if order_match['backward_asset'] != config.BTC:
util.credit(db, block_index, order_match['tx1_address'],
order_match['backward_asset'],
order_match['backward_quantity'], action='order {}'.format(tx1_order_status), event=order_match['id'])
else:
tx1_give_remaining = tx1_order['give_remaining'] + order_match['backward_quantity']
tx1_get_remaining = tx1_order['get_remaining'] + order_match['forward_quantity']
if tx1_order['get_asset'] == config.BTC and (block_index >= 297000 or config.TESTNET): # Protocol change.
tx1_fee_required_remaining = tx1_order['fee_required_remaining'] + order_match['fee_paid']
else:
tx1_fee_required_remaining = tx1_order['fee_required_remaining']
tx1_order_status = tx1_order['status']
bindings = {
'give_remaining': tx1_give_remaining,
'get_remaining': tx1_get_remaining,
'status': tx1_order_status,
'fee_required_remaining': tx1_fee_required_remaining,
'tx_hash': order_match['tx1_hash']
}
sql='update orders set give_remaining = :give_remaining, get_remaining = :get_remaining, fee_required_remaining = :fee_required_remaining where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'orders', bindings)
if block_index < 286500: # Protocol change.
# Sanity check: one of the two must have expired.
tx0_order_time_left = tx0_order['expire_index'] - block_index
tx1_order_time_left = tx1_order['expire_index'] - block_index
assert tx0_order_time_left or tx1_order_time_left
# Penalize tardiness.
if block_index >= 313900 or config.TESTNET: # Protocol change.
if tx0_order['status'] == 'expired' and order_match['forward_asset'] == config.BTC:
exact_penalty(db, order_match['tx0_address'], block_index, order_match['id'])
if tx1_order['status'] == 'expired' and order_match['backward_asset'] == config.BTC:
exact_penalty(db, order_match['tx1_address'], block_index, order_match['id'])
# Re‐match.
if block_index >= 310000 or config.TESTNET: # Protocol change.
if not (block_index >= 315000 or config.TESTNET): # Protocol change.
cursor.execute('''SELECT * FROM transactions\
WHERE tx_hash = ?''', (tx0_order['tx_hash'],))
match(db, list(cursor)[0], block_index)
cursor.execute('''SELECT * FROM transactions\
WHERE tx_hash = ?''', (tx1_order['tx_hash'],))
match(db, list(cursor)[0], block_index)
if status == 'expired':
# Record order match expiration.
bindings = {
'order_match_id': order_match['id'],
'tx0_address': order_match['tx0_address'],
'tx1_address': order_match['tx1_address'],
'block_index': block_index
}
sql='insert into order_match_expirations values(:order_match_id, :tx0_address, :tx1_address, :block_index)'
cursor.execute(sql, bindings)
cursor.close()
def validate (db, source, give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required, block_index):
problems = []
cursor = db.cursor()
if give_asset == config.BTC and get_asset == config.BTC:
problems.append('cannot trade {} for itself'.format(config.BTC))
if not isinstance(give_quantity, int):
problems.append('give_quantity must be in satoshis')
return problems
if not isinstance(get_quantity, int):
problems.append('get_quantity must be in satoshis')
return problems
if not isinstance(fee_required, int):
problems.append('fee_required must be in satoshis')
return problems
if not isinstance(expiration, int):
problems.append('expiration must be expressed as an integer block delta')
return problems
if give_quantity <= 0: problems.append('non‐positive give quantity')
if get_quantity <= 0: problems.append('non‐positive get quantity')
if fee_required < 0: problems.append('negative fee_required')
if expiration < 0: problems.append('negative expiration')
if expiration == 0 and not (block_index >= 317500 or config.TESTNET): # Protocol change.
problems.append('zero expiration')
if not give_quantity or not get_quantity:
problems.append('zero give or zero get')
cursor.execute('select * from issuances where (status = ? and asset = ?)', ('valid', give_asset))
if give_asset not in (config.BTC, config.XCP) and not cursor.fetchall():
problems.append('no such asset to give ({})'.format(give_asset))
cursor.execute('select * from issuances where (status = ? and asset = ?)', ('valid', get_asset))
if get_asset not in (config.BTC, config.XCP) and not cursor.fetchall():
problems.append('no such asset to get ({})'.format(get_asset))
if expiration > config.MAX_EXPIRATION:
problems.append('expiration overflow')
# For SQLite3
if give_quantity > config.MAX_INT or get_quantity > config.MAX_INT or fee_required > config.MAX_INT:
problems.append('integer overflow')
cursor.close()
return problems
def compose (db, source, give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required):
cursor = db.cursor()
# Check balance.
if give_asset == config.BTC:
if bitcoin.get_btc_balance(source) * config.UNIT < give_quantity:
logging.warning('WARNING: insufficient funds for {}pay.'.format(config.BTC))
else:
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, give_asset)))
if (not balances or balances[0]['quantity'] < give_quantity):
raise exceptions.ComposeError('insufficient funds')
problems = validate(db, source, give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required, util.last_block(db)['block_index'])
if problems: raise exceptions.ComposeError(problems)
give_id = util.get_asset_id(give_asset, util.last_block(db)['block_index'])
get_id = util.get_asset_id(get_asset, util.last_block(db)['block_index'])
data = struct.pack(config.TXTYPE_FORMAT, ID)
data += struct.pack(FORMAT, give_id, give_quantity, get_id, get_quantity,
expiration, fee_required)
cursor.close()
return (source, [], data)
def parse (db, tx, message):
order_parse_cursor = db.cursor()
# Unpack message.
try:
if len(message) != LENGTH:
raise exceptions.UnpackError
give_id, give_quantity, get_id, get_quantity, expiration, fee_required = struct.unpack(FORMAT, message)
give_asset = util.get_asset_name(give_id, tx['block_index'])
get_asset = util.get_asset_name(get_id, tx['block_index'])
status = 'open'
except (exceptions.UnpackError, exceptions.AssetNameError, struct.error) as e:
give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required = 0, 0, 0, 0, 0, 0
status = 'invalid: could not unpack'
price = 0
if status == 'open':
try:
price = util.price(get_quantity, give_quantity, tx['block_index'])
except ZeroDivisionError:
price = 0
# Overorder
order_parse_cursor.execute('''SELECT * FROM balances \
WHERE (address = ? AND asset = ?)''', (tx['source'], give_asset))
balances = list(order_parse_cursor)
if give_asset != config.BTC:
if not balances:
give_quantity = 0
else:
balance = balances[0]['quantity']
if balance < give_quantity:
give_quantity = balance
get_quantity = int(price * give_quantity)
problems = validate(db, tx['source'], give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required, tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
# Debit give quantity. (Escrow.)
if status == 'open':
if give_asset != config.BTC: # No need (or way) to debit BTC.
util.debit(db, tx['block_index'], tx['source'], give_asset, give_quantity, action='open order', event=tx['tx_hash'])
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'give_asset': give_asset,
'give_quantity': give_quantity,
'give_remaining': give_quantity,
'get_asset': get_asset,
'get_quantity': get_quantity,
'get_remaining': get_quantity,
'expiration': expiration,
'expire_index': tx['block_index'] + expiration,
'fee_required': fee_required,
'fee_required_remaining': fee_required,
'fee_provided': tx['fee'],
'fee_provided_remaining': tx['fee'],
'status': status,
}
sql='insert into orders values(:tx_index, :tx_hash, :block_index, :source, :give_asset, :give_quantity, :give_remaining, :get_asset, :get_quantity, :get_remaining, :expiration, :expire_index, :fee_required, :fee_required_remaining, :fee_provided, :fee_provided_remaining, :status)'
order_parse_cursor.execute(sql, bindings)
# Match.
if status == 'open' and tx['block_index'] != config.MEMPOOL_BLOCK_INDEX:
match(db, tx)
order_parse_cursor.close()
def match (db, tx, block_index=None):
cursor = db.cursor()
# Get order in question.
orders = list(cursor.execute('''SELECT * FROM orders\
WHERE (tx_index = ? AND status = ?)''', (tx['tx_index'], 'open')))
if not orders:
cursor.close()
return
else:
assert len(orders) == 1
tx1 = orders[0]
cursor.execute('''SELECT * FROM orders \
WHERE (give_asset=? AND get_asset=? AND status=? AND tx_hash != ?)''',
(tx1['get_asset'], tx1['give_asset'], 'open', tx1['tx_hash']))
tx1_give_remaining = tx1['give_remaining']
tx1_get_remaining = tx1['get_remaining']
order_matches = cursor.fetchall()
if tx['block_index'] > 284500 or config.TESTNET: # Protocol change.
order_matches = sorted(order_matches, key=lambda x: x['tx_index']) # Sort by tx index second.
order_matches = sorted(order_matches, key=lambda x: util.price(x['get_quantity'], x['give_quantity'], tx1['block_index'])) # Sort by price first.
# Get fee remaining.
tx1_fee_required_remaining = tx1['fee_required_remaining']
tx1_fee_provided_remaining = tx1['fee_provided_remaining']
tx1_status = tx1['status']
for tx0 in order_matches:
order_match_id = tx0['tx_hash'] + tx1['tx_hash']
if not block_index:
block_index = max(tx0['block_index'], tx1['block_index'])
if tx1_status != 'open': break
logging.debug('Considering: ' + tx0['tx_hash'])
tx0_give_remaining = tx0['give_remaining']
tx0_get_remaining = tx0['get_remaining']
# Ignore previous matches. (Both directions, just to be sure.)
cursor.execute('''SELECT * FROM order_matches
WHERE id = ? ''', (tx0['tx_hash'] + tx1['tx_hash'], ))
if list(cursor):
logging.debug('Skipping: previous match')
continue
cursor.execute('''SELECT * FROM order_matches
WHERE id = ? ''', (tx1['tx_hash'] + tx0['tx_hash'], ))
if list(cursor):
logging.debug('Skipping: previous match')
continue
# Get fee provided remaining.
tx0_fee_required_remaining = tx0['fee_required_remaining']
tx0_fee_provided_remaining = tx0['fee_provided_remaining']
# Make sure that that both orders still have funds remaining (if order involves BTC, and so cannot be ‘filled’).
if tx0['give_asset'] == config.BTC or tx0['get_asset'] == config.BTC: # Gratuitous
if tx0_give_remaining <= 0 or tx1_give_remaining <= 0:
logging.debug('Skipping: negative give quantity remaining')
continue
if block_index >= 292000 and block_index <= 310500 and not config.TESTNET: # Protocol changes
if tx0_get_remaining <= 0 or tx1_get_remaining <= 0:
logging.debug('Skipping: negative get quantity remaining')
continue
if block_index >= 294000 or config.TESTNET: # Protocol change.
if tx0['fee_required_remaining'] < 0:
logging.debug('Skipping: negative tx0 fee required remaining')
continue
if tx0['fee_provided_remaining'] < 0:
logging.debug('Skipping: negative tx0 fee provided remaining')
continue
if tx1_fee_provided_remaining < 0:
logging.debug('Skipping: negative tx1 fee provided remaining')
continue
if tx1_fee_required_remaining < 0:
logging.debug('Skipping: negative tx1 fee required remaining')
continue
# If the prices agree, make the trade. The found order sets the price,
# and they trade as much as they can.
tx0_price = util.price(tx0['get_quantity'], tx0['give_quantity'], block_index)
tx1_price = util.price(tx1['get_quantity'], tx1['give_quantity'], block_index)
tx1_inverse_price = util.price(tx1['give_quantity'], tx1['get_quantity'], block_index)
# Protocol change.
if tx['block_index'] < 286000: tx1_inverse_price = util.price(1, tx1_price, block_index)
logging.debug('Tx0 Price: {}; Tx1 Inverse Price: {}'.format(float(tx0_price), float(tx1_inverse_price)))
if tx0_price > tx1_inverse_price:
logging.debug('Skipping: price mismatch.')
else:
logging.debug('Potential forward quantities: {}, {}'.format(tx0_give_remaining, int(util.price(tx1_give_remaining, tx0_price, block_index))))
forward_quantity = int(min(tx0_give_remaining, int(util.price(tx1_give_remaining, tx0_price, block_index))))
logging.debug('Forward Quantity: {}'.format(forward_quantity))
backward_quantity = round(forward_quantity * tx0_price)
logging.debug('Backward Quantity: {}'.format(backward_quantity))
if not forward_quantity:
logging.debug('Skipping: zero forward quantity.')
continue
if block_index >= 286500 or config.TESTNET: # Protocol change.
if not backward_quantity:
logging.debug('Skipping: zero backward quantity.')
continue
forward_asset, backward_asset = tx1['get_asset'], tx1['give_asset']
if block_index >= 313900 or config.TESTNET: # Protocol change.
min_btc_quantity = 0.001 * config.UNIT # 0.001 BTC
if (forward_asset == config.BTC and forward_quantity <= min_btc_quantity) or (backward_asset == config.BTC and backward_quantity <= min_btc_quantity):
logging.debug('Skipping: below minimum {} quantity'.format(config.BTC))
continue
# Check and update fee remainings.
fee = 0
if block_index >= 286500 or config.TESTNET: # Protocol change. Deduct fee_required from provided_remaining, etc., if possible (else don’t match).
if tx1['get_asset'] == config.BTC:
if block_index >= 310500 or config.TESTNET: # Protocol change.
fee = int(tx1['fee_required'] * util.price(backward_quantity, tx1['give_quantity'], block_index))
else:
fee = int(tx1['fee_required_remaining'] * util.price(forward_quantity, tx1_get_remaining, block_index))
logging.debug('Tx0 fee provided remaining: {}; required fee: {}'.format(tx0_fee_provided_remaining / config.UNIT, fee / config.UNIT))
if tx0_fee_provided_remaining < fee:
logging.debug('Skipping: tx0 fee provided remaining is too low.')
continue
else:
tx0_fee_provided_remaining -= fee
if block_index >= 287800 or config.TESTNET: # Protocol change.
tx1_fee_required_remaining -= fee
elif tx1['give_asset'] == config.BTC:
if block_index >= 310500 or config.TESTNET: # Protocol change.
fee = int(tx0['fee_required'] * util.price(backward_quantity, tx0['give_quantity'], block_index))
else:
fee = int(tx0['fee_required_remaining'] * util.price(backward_quantity, tx0_get_remaining, block_index))
logging.debug('Tx1 fee provided remaining: {}; required fee: {}'.format(tx1_fee_provided_remaining / config.UNIT, fee / config.UNIT))
if tx1_fee_provided_remaining < fee:
logging.debug('Skipping: tx1 fee provided remaining is too low.')
continue
else:
tx1_fee_provided_remaining -= fee
if block_index >= 287800 or config.TESTNET: # Protocol change.
tx0_fee_required_remaining -= fee
else: # Don’t deduct.
if tx1['get_asset'] == config.BTC:
if tx0_fee_provided_remaining < tx1['fee_required']: continue
elif tx1['give_asset'] == config.BTC:
if tx1_fee_provided_remaining < tx0['fee_required']: continue
if config.BTC in (tx1['give_asset'], tx1['get_asset']):
status = 'pending'
else:
status = 'completed'
# Credit.
util.credit(db, tx['block_index'], tx1['source'], tx1['get_asset'],
forward_quantity, action='order match', event=order_match_id)
util.credit(db, tx['block_index'], tx0['source'], tx0['get_asset'],
backward_quantity, action='order match', event=order_match_id)
# Debit the order, even if it involves giving bitcoins, and so one
# can't debit the sending account.
# Get remainings may be negative.
tx0_give_remaining -= forward_quantity
tx0_get_remaining -= backward_quantity
tx1_give_remaining -= backward_quantity
tx1_get_remaining -= forward_quantity
# Update give_remaining, get_remaining.
# tx0
tx0_status = 'open'
if tx0_give_remaining <= 0 or (tx0_get_remaining <= 0 and (block_index >= 292000 or config.TESTNET)): # Protocol change
if tx0['give_asset'] != config.BTC and tx0['get_asset'] != config.BTC:
# Fill order, and recredit give_remaining.
tx0_status = 'filled'
util.credit(db, block_index, tx0['source'], tx0['give_asset'], tx0_give_remaining, event=tx1['tx_hash'], action='filled')
bindings = {
'give_remaining': tx0_give_remaining,
'get_remaining': tx0_get_remaining,
'fee_required_remaining': tx0_fee_required_remaining,
'fee_provided_remaining': tx0_fee_provided_remaining,
'status': tx0_status,
'tx_hash': tx0['tx_hash']
}
sql='update orders set give_remaining = :give_remaining, get_remaining = :get_remaining, fee_required_remaining = :fee_required_remaining, fee_provided_remaining = :fee_provided_remaining, status = :status where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'orders', bindings)
# tx1
if tx1_give_remaining <= 0 or (tx1_get_remaining <= 0 and (block_index >= 292000 or config.TESTNET)): # Protocol change
if tx1['give_asset'] != config.BTC and tx1['get_asset'] != config.BTC:
# Fill order, and recredit give_remaining.
tx1_status = 'filled'
util.credit(db, block_index, tx1['source'], tx1['give_asset'], tx1_give_remaining, event=tx0['tx_hash'], action='filled')
bindings = {
'give_remaining': tx1_give_remaining,
'get_remaining': tx1_get_remaining,
'fee_required_remaining': tx1_fee_required_remaining,
'fee_provided_remaining': tx1_fee_provided_remaining,
'status': tx1_status,
'tx_hash': tx1['tx_hash']
}
sql='update orders set give_remaining = :give_remaining, get_remaining = :get_remaining, fee_required_remaining = :fee_required_remaining, fee_provided_remaining = :fee_provided_remaining, status = :status where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'orders', bindings)
# Calculate when the match will expire.
if block_index >= 308000 or config.TESTNET: # Protocol change.
match_expire_index = block_index + 20
elif block_index >= 286500 or config.TESTNET: # Protocol change.
match_expire_index = block_index + 10
else:
match_expire_index = min(tx0['expire_index'], tx1['expire_index'])
# Record order match.
bindings = {
'id': tx0['tx_hash'] + tx['tx_hash'],
'tx0_index': tx0['tx_index'],
'tx0_hash': tx0['tx_hash'],
'tx0_address': tx0['source'],
'tx1_index': tx1['tx_index'],
'tx1_hash': tx1['tx_hash'],
'tx1_address': tx1['source'],
'forward_asset': forward_asset,
'forward_quantity': forward_quantity,
'backward_asset': backward_asset,
'backward_quantity': backward_quantity,
'tx0_block_index': tx0['block_index'],
'tx1_block_index': tx1['block_index'],
'block_index': block_index,
'tx0_expiration': tx0['expiration'],
'tx1_expiration': tx1['expiration'],
'match_expire_index': match_expire_index,
'fee_paid': fee,
'status': status,
}
sql='insert into order_matches values(:id, :tx0_index, :tx0_hash, :tx0_address, :tx1_index, :tx1_hash, :tx1_address, :forward_asset, :forward_quantity, :backward_asset, :backward_quantity, :tx0_block_index, :tx1_block_index, :block_index, :tx0_expiration, :tx1_expiration, :match_expire_index, :fee_paid, :status)'
cursor.execute(sql, bindings)
if tx1_status == 'filled':
break
cursor.close()
return
def expire (db, block_index):
cursor = db.cursor()
# Expire orders and give refunds for the quantity give_remaining (if non-zero; if not BTC).
cursor.execute('''SELECT * FROM orders \
WHERE (status = ? AND expire_index < ?)''', ('open', block_index))
orders = list(cursor)
for order in orders:
cancel_order(db, order, 'expired', block_index)
# Expire order_matches for BTC with no BTC.
cursor.execute('''SELECT * FROM order_matches \
WHERE (status = ? and match_expire_index < ?)''', ('pending', block_index))
order_matches = list(cursor)
for order_match in order_matches:
cancel_order_match(db, order_match, 'expired', block_index)
if block_index >= 315000 or config.TESTNET: # Protocol change.
# Re‐match.
for order_match in order_matches:
cursor.execute('''SELECT * FROM transactions\
WHERE tx_hash = ?''', (order_match['tx0_hash'],))
match(db, list(cursor)[0], block_index)
cursor.execute('''SELECT * FROM transactions\
WHERE tx_hash = ?''', (order_match['tx1_hash'],))
match(db, list(cursor)[0], block_index)
cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
the-stack_106_16516
|
# FILE INFO ###################################################
# Author: Jason Liu <[email protected]>
# Created on July 2, 2019
# Last Update: Time-stamp: <2019-09-07 09:18:15 liux>
###############################################################
import math, re
__all__ = ["QDIS", 'WelfordStats', "TimeMarks", "DataSeries", "TimeSeries", "DataCollector"]
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class QDIS:
"""Queuing disciplines used by semaphores and resources."""
FIFO = 0 # first in first out
LIFO = 1 # last in first out
SIRO = 2 # service in random order
PRIORITY = 3 # priority based
class WelfordStats(object):
"""Welford's one-pass algorithm to get simple statistics (including
the mean and variance) from a series of data."""
def __init__(self):
self._n = 0
self._mean = 0.0
self._varsum = 0.0
self._max = float('-inf')
self._min = float('inf')
def __len__(self): return self._n
def push(self, x):
"""Add data to the series."""
self._n += 1
if x > self._max: self._max = x
if x < self._min: self._min = x
d = x-self._mean
self._varsum += d*d*(self._n-1)/self._n
self._mean += d/self._n
def min(self): return self._min
def max(self): return self._max
def mean(self): return self._mean
def stdev(self): return math.sqrt(self._varsum/self._n)
def var(self): return self._varsum/self._n
class TimeMarks(object):
"""A series of (increasing) time instances."""
def __init__(self, keep_data=False):
if keep_data: self._data = []
else: self._data = None
self._n = 0
def __len__(self):
"""Return the number of collected samples."""
return self._n
def push(self, t):
if self._n == 0:
self._last = t
elif t < self._last:
errmsg = "timemarks.push(%g) earlier than last entry (%g)" % (t, self._last)
log.error(errmsg)
raise ValueError(errmsg)
if self._data is not None:
self._data.append(t)
self._n += 1
self._last = t
def data(self):
"""Return all samples if keep_data has been set when the timemarks was
initialized; otherwise, return None."""
return self._data
def rate(self, t=None):
"""Return the arrival rate, which is the averge number of sameples up
to the given time. If t is ignored, it's up to the time of the
last entry."""
if self._n > 0:
if t is None: t = self._last
elif t < self._last:
errmsg = "timemarks.rate(t=%g) earlier than last entry (%g)" % (t, self._last)
log.error(errmsg)
raise ValueError(errmsg)
return self._n/t
else:
return 0
class DataSeries(object):
"""A series of numbers."""
def __init__(self, keep_data=False):
if keep_data: self._data = []
else: self._data = None
self._rs = WelfordStats()
def __len__(self):
"""Return the number of collected samples."""
return len(self._rs)
def push(self, d):
if self._data is not None:
self._data.append(d)
self._rs.push(d)
def data(self):
"""Return all samples if keep_data has been set when the dataseries
has been initialized; otherwise, return None."""
return self._data
def mean(self):
"""Return the sample mean."""
if len(self._rs) > 0: return self._rs.mean()
else: return 0
def stdev(self):
"""Return the sample standard deviation."""
if len(self._rs) > 1: return self._rs.stdev()
else: return float('inf')
def var(self):
"""Return the sample variance."""
if len(self._rs) > 1: return self._rs.var()
else: return float('inf')
def min(self):
"""Return the minimum of all samples."""
if len(self._rs) > 0: return self._rs.min()
else: return float('-inf')
def max(self):
"""Return the maximum of all samples."""
if len(self._rs) > 0: return self._rs.max()
else: return float('inf')
class TimeSeries(object):
"""A series of time-value pairs."""
def __init__(self, keep_data=False):
if keep_data: self._data = []
else: self._data = None
self._rs = WelfordStats()
self._area = 0
def __len__(self):
"""Return the number of collected samples."""
return len(self._rs)
def push(self, d):
t, v = d
if len(self._rs) == 0:
self._last_t = t
self._last_v = v
elif t < self._last_t:
errmsg = "timeseries.push(%r) earlier than last entry (%g)" % (d, self._last_t)
log.error(errmsg)
raise ValueError(errmsg)
if self._data is not None:
self._data.append(d)
self._rs.push(v)
self._area += (t-self._last_t)*self._last_v
self._last_t = t
self._last_v = v
def data(self):
"""Return all samples if keep_data has been set when the timeseries
was initialized; otherwise, return None."""
return self._data
def rate(self, t=None):
"""Return the arrival rate, which is the averge number of samples up
to the given time. If t is ignored, it's up to the time of the
last entry."""
if len(self._rs) > 0:
if t is None: t = self._last_t
elif t < self._last_t:
errmsg = "timeseries.rate(t=%g) earlier than last entry (%g)" % (t, self._last_t)
log.error(errmsg)
raise ValueError(errmsg)
return len(self._rs)/t
else:
return 0
def mean(self):
"""Return the sample mean."""
if len(self._rs) > 0: return self._rs.mean()
else: return 0
def stdev(self):
"""Return the sample standard deviation."""
if len(self._rs) > 1: return self._rs.stdev()
else: return float('inf')
def var(self):
"""Return the sample variance."""
if len(self._rs) > 1: return self._rs.var()
else: return float('inf')
def min(self):
"""Return the minimum of all samples."""
if len(self._rs) > 0: return self._rs.min()
else: return float('-inf')
def max(self):
"""Return the maximum of all samples."""
if len(self._rs) > 0: return self._rs.max()
else: return float('inf')
def avg_over_time(self, t=None):
"""Return the average value over time. If t is ignored, it's the
average up to the time of the last entry."""
if len(self._rs) > 0:
if t is None: t = self._last_t
if t < self._last_t:
errmsg = "timeseries.avg_over_time(t=%g) earlier than last entry (%g)" % (t, self._last_t)
log.error(errmsg)
raise ValueError(errmsg)
return (self._area+(t-self._last_t)*self._last_v)/t
else:
return 0
class DataCollector(object):
"""Statistics collection for resources, stores, buckets, and mailboxes."""
def __init__(self, **kwargs):
"""Initialize the data collector. kwargs is the keyworded arguments;
it's a dictionary containing all attributes allowed to be
collected at the corresponding resource or facility."""
log.info("creating data collector:")
self._attrs = kwargs
patterns = {
re.compile(r'timemarks\s*(\(\s*(all)?\s*\))?') : TimeMarks,
re.compile(r'dataseries\s*(\(\s*(all)?\s*\))?') : DataSeries,
re.compile(r'timeseries\s*(\(\s*(all)?\s*\))?') : TimeSeries
}
for k, v in self._attrs.items():
if hasattr(self, k):
errmsg = "datacollector attribute %s already exists" % k
log.error(errmsg)
raise ValueError(errmsg)
for pat, cls in patterns.items():
m = pat.match(v)
if m is not None:
if m.group(2):
v = cls(True)
log.info(" %s: %s(keep_data=True)" % (k, cls.__name__))
else:
v = cls(False)
log.info(" %s: %s(keep_data=False)" % (k, cls.__name__))
setattr(self, k, v)
self._attrs[k] = v
break
else:
errmsg = "datacollector %r has unknown value (%r)" % (k, v)
log.error(errmsg)
raise ValueError(errmsg)
def _sample(self, k, v):
if k in self._attrs:
getattr(self, k).push(v)
def report(self, t=None):
"""Print out the collected statistics nicely. If t is provided, it's
expected to the the simulation end time; if t is ignored, the
statistics are up to the time of the last sample."""
for k, v in self._attrs.items():
if isinstance(v, TimeMarks):
print("%s (timemarks): samples=%d" % (k, len(v)))
if len(v) > 0:
d = v.data()
if d is not None:
print(" data=%r ..." % d[:3])
print(' rate = %g' % v.rate(t))
elif isinstance(v, DataSeries):
print("%s (dataseries): samples=%d" % (k, len(v)))
if len(v) > 0:
d = v.data()
if d is not None:
print(" data=%r ..." % d[:3])
print(' mean = %g' % v.mean())
if len(v) > 1:
print(' stdev = %g' % v.stdev())
print(' var = %g' % v.var())
print(' min = %g' % v.min())
print(' max = %g' % v.max())
elif isinstance(v, TimeSeries):
print("%s (timeseries): samples=%d" % (k, len(v)))
if len(v) > 0:
d = v.data()
if d is not None:
print(" data=%r ..." % d[:3])
print(' rate = %g' % v.rate(t))
print(' mean = %g' % v.mean())
if len(v) > 1:
print(' stdev = %g' % v.stdev())
print(' var = %g' % v.var())
print(' min = %g' % v.min())
print(' max = %g' % v.max())
print(" avg_over_time=%g" % v.avg_over_time(t))
|
the-stack_106_16518
|
"""
Finds the lowest sum of a set of five primes for which any two primes concatenate to produce another prime
Author: Juan Rios
"""
import math
from utils import prime_factors
lower_boundary = 10000
primes = prime_factors(lower_boundary)
primes_mod3_1 = []
primes_mod3_2 = []
for i in primes[1:]:
if i%3==1:
primes_mod3_1.append(i)
elif i%3==2:
primes_mod3_2.append(i)
else:
primes_mod3_1.append(i)
primes_mod3_2.append(i)
primes_index = prime_factors(10**5, False)
"""
Returns 1 if the number is prime, 0 otherwise
"""
def is_prime(number):
if number < len(primes_index):
return primes_index[number]
for p in primes:
if p > math.sqrt(number):
return 1
if number%p==0:
return 0
def reduce_candidates(candidates):
pivot = candidates[0]
tmp =[]
for p in candidates[1:]:
n1 = int(str(p)+str(pivot))
n2 = int(str(pivot)+str(p))
if is_prime(n1) and is_prime(n2):
tmp.append(p)
return tmp
"""
Returns the number lowest sum of set of five primes
"""
def lowest_prime_sum():
min_sum = float('inf')
primes = primes_mod3_1
for idx0 in range(len(primes)):
prime_candidates0 = reduce_candidates(primes[idx0:])
if primes[idx0]> lower_boundary/5:
break
for idx1 in range(len(prime_candidates0)):
if sum([primes[idx0], prime_candidates0[idx1]])>min_sum:
break
prime_candidates1 = reduce_candidates(prime_candidates0[idx1:])
for idx2 in range(len(prime_candidates1)):
if sum([primes[idx0], prime_candidates0[idx1],prime_candidates1[idx2]])>min_sum:
break
prime_candidates2 = reduce_candidates(prime_candidates1[idx2:])
for idx3 in range(len(prime_candidates2)):
if sum([primes[idx0], prime_candidates0[idx1],prime_candidates1[idx2],prime_candidates2[idx3]])>min_sum:
break
prime_candidates3 = reduce_candidates(prime_candidates2[idx3:])
for idx4 in range(len(prime_candidates3)):
ar = [primes[idx0], prime_candidates0[idx1],prime_candidates1[idx2],prime_candidates2[idx3],prime_candidates3[idx4]]
if min_sum > sum(ar):
min_sum = sum(ar)
print(ar, sum(ar))
else:
break
return min_sum
if __name__ == "__main__":
print('The lowest sum of a set of five primes is {0}'.format(lowest_prime_sum()))
|
the-stack_106_16519
|
from argparse import ArgumentParser
from multiprocessing import Pool
from time import sleep
import requests
import os.path
import json
import sys
def args():
parser = ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', dest='crawl_top', required=False, default=False, type=int, help='Crawl for domains in the top-1m by Alexa. Set how many domains to crawl, for example: 100. Up to 1000000')
group.add_argument('-l', dest='list', required=False, default=False, help='Path to a file containing the DBs names to be checked. One per file')
parser.add_argument('-o', dest='fn', required=False, default='results.json', help='Output file name. Default: results.json')
parser.add_argument('-d', dest='path', required=False, default=False, help="Absolute path to the downloaded HTML file")
parser.add_argument('-p', dest='process', required=False, default=1, type=int, help='How many processes to execute')
parser.add_argument('--dnsdumpster', action='store_true', required=False, default=False, help='Use the DNSDumpster API to gather DBs')
parser.add_argument('--just-v', action='store_true', required=False, default=False, help='Ignore non-vulnerable DBs')
parser.add_argument('--amass', dest='amass', required=False, default=False, help='Path to the output file of an amass scan ([-o] argument)')
if len(sys.argv) == 1:
parser.error("No arguments supplied.")
sys.exit()
return parser.parse_args()
def clean(domain):
'''
Clean the url so they are sutiable to be crawled.
'''
if domain.count('http://') == 0:
url = ('https://{}/.json').format(domain)
else:
domain = domain.replace('http', 'https')
url = ('{}.json').format(domain)
return url
def worker(url):
'''
Main function in charge of the bulk of the crawling, it assess a status to
each DB depending on the response.
'''
print('Crawling {} ...'.format(url))
sleep(0.5) #a bit of delay to not abuse in excess the servers
try:
r = requests.get(url).json()
except requests.exceptions.RequestException as e:
print(e)
try:
if 'error' in r.keys():
if r['error'] == 'Permission denied' and not args_.just_v:
return {'status':-2, 'url':url} #successfully protected
elif r['error'] == '404 Not Found' and not args_.just_v:
return {'status':-1, 'url':url} #doesn't exist
elif not args_.just_v:
return {'status':0, 'url':url} #maybe there's a chance for further explotiation
else:
return {'status':1, 'url':url, 'data':r} #vulnerable
except AttributeError:
'''
Some DBs may just return null
'''
if not args_.just_v:
return {'status':0, 'url':url}
def load_file():
'''
Parse the HTML file with the results of the pentest-tools subdomains scanner.
'''
try:
from bs4 import BeautifulSoup
with open(args_.path, 'r') as f:
print('Gathering subdomains through the downloaded file...')
s = BeautifulSoup(f.read(), 'html.parser')
table = s.find('div', class_='col-xs-12').find('table')
return [row.find('a')['href'] for row in table.find('tbody').find_all('tr')[:-1]]
except IOError as e:
raise e
def down_tops():
'''
Download the specified number of domains in the Alexa's 1M file.
Credits for the script to @evilpacket
https://gist.github.com/evilpacket/3628941
'''
from subprocess import Popen
command = "wget -q http://s3.amazonaws.com/alexa-static/top-1m.csv.zip;unzip top-1m.csv.zip; awk -F ',' '{print $2}' top-1m.csv|head -"+str(args_.crawl_top)+" > top-"+str(args_.crawl_top)+".txt; rm top-1m.csv*"
try:
Popen(command, shell=True).wait()
except Exception:
raise Exception
def tops():
'''
Gather the required number of top domains. Download the file if it
hasn't been downloaded. Then, repare the urls to be crawled.
'''
top_doms = set()
fn = 'top-{}.txt'.format(args_.crawl_top)
if not os.path.isfile(fn):
down_tops()
print('Retrieving {} top domains'.format(args_.crawl_top))
with open(fn, 'r') as f:
[top_doms.add(line.split('.')[0]) for line in f]
top_doms = ['https://{}/.json'.format(dom) for dom in top_doms]
return top_doms
def amass():
'''
From an amass scan output file([-o] argument), gather the DBs urls to crawl.
'''
with open(args_.amass) as f:
dbs = ['https://{}/.json'.format(line.rstrip()) for line in f]
return dbs
def dns_dumpster():
from dnsdumpster.DNSDumpsterAPI import DNSDumpsterAPI
print('Gathering subdomains using DNSDumpster...')
results = DNSDumpsterAPI().search('firebaseio.com')
return [domain['domain'] for domain in results['dns_records']['host']]
if __name__ == '__main__':
args_ = args()
if not args_.list:
dbs = []
if args_.dnsdumpster:
dbs.extend(dns_dumpster())
if args_.path:
dbs.extend(load_file())
urls = list(set(map(clean, dbs)))
if args_.crawl_top:
urls.extend(tops())
if args_.amass:
urls.extend(amass())
print('\nLooting...')
p = Pool(args_.process)
loot = [result for result in list(p.map(worker, urls)) if result != None]
else:
urls = set()
with open(args_.list, 'r') as f:
[urls.add('https://{}/.json'.format(line.rstrip())) for line in f]
p = Pool(args_.process)
loot = [result for result in list(p.map(worker, urls)) if result != None]
print('Saving results to {}\n'.format(args_.fn))
with open(args_.fn, 'w') as f:
json.dump(loot, f)
l = {'1':0, '0':0, '-1':0, '-2':0}
Vulnerable = []
for result in loot:
l[str(result['status'])] +=1
if str(result['status']) == '1':
Vulnerable.append(result['url'])
print('404 DBs: {}'.format(l['-2']))
print('Secure DBs: {}'.format(l['-1']))
print('Possible vulnerable DBs: {}'.format(l['0']))
print('Vulnerable DBs: {}'.format(l['1']))
print(Vulnerable)
|
the-stack_106_16522
|
from mle_hyperopt import hyperopt
def test_decorator():
@hyperopt(
strategy_type="Grid",
num_search_iters=25,
real={
"x": {"begin": 0.0, "end": 0.5, "bins": 5},
"y": {"begin": 0, "end": 0.5, "bins": 5},
},
)
def circle(config):
distance = abs((config["x"] ** 2 + config["y"] ** 2))
return distance
strategy = circle()
assert len(strategy) == 25
|
the-stack_106_16523
|
#!/usr/bin/env python3
from os.path import basename, dirname, exists, join, relpath
import glob, shutil, sys
# Determine the root directory for the source build and the Installed Build
sourceRoot = sys.argv[1]
installedRoot = join(sourceRoot, 'LocalBuilds', 'Engine', 'Linux')
# Locate the bundled toolchain and copy it to the Installed Build
sdkGlob = join(sourceRoot, 'Engine', 'Extras', 'ThirdPartyNotUE', 'SDKs', 'HostLinux', 'Linux_x64', '*', 'x86_64-unknown-linux-gnu')
for bundled in glob.glob(sdkGlob):
# Extract the root path for the toolchain
toolchain = dirname(bundled)
# Print progress output
print('Copying bundled toolchain "{}" to Installed Build...'.format(basename(toolchain)), file=sys.stderr)
sys.stderr.flush()
# Perform the copy
dest = join(installedRoot, relpath(toolchain, sourceRoot))
if exists(dest) == True:
print('Destination toolchain already exists: {}'.format(dest), file=sys.stderr, flush=True)
else:
shutil.copytree(toolchain, dest)
|
the-stack_106_16525
|
import discord
import string
from discord.ext import commands
from core import DCSServerBot, Plugin
class Help(Plugin):
@commands.command(name='help',
description='The help command!',
usage='<plugin>')
async def help(self, ctx, plugin='all'):
help_embed = discord.Embed(color=discord.Color.blue())
if plugin == 'all':
help_embed.title = 'DCSServerBot Plugins'
for p in self.bot.plugins:
if p.lower() != 'help':
help_embed.add_field(name='**' + string.capwords(p) + '**',
value=f'```{ctx.prefix}help {p.lower()}```', inline=True)
pass
else:
help_embed.title = f'{string.capwords(plugin)} Commands'
if plugin in self.bot.plugins:
cmds = ''
descriptions = ''
# Get a list of all commands for the specified plugin
for cog in self.bot.cogs.values():
if f'.{plugin}.' in type(cog).__module__:
commands_list = self.bot.get_cog(type(cog).__name__).get_commands()
for command in commands_list:
if command.hidden is False:
cmds += f'{ctx.prefix}{command.name}'
# Also add aliases, if there are any
if len(command.aliases) > 0:
cmds += f' / {" / ".join(command.aliases)}'
if command.usage is not None:
cmds += ' ' + command.usage
cmds += '\n'
descriptions += f'{command.description}\n'
if len(cmds) == 0:
cmds = 'No commands.'
if len(descriptions) == 0:
descriptions = '_ _'
help_embed.add_field(name='Command', value=cmds)
help_embed.add_field(name='Description', value=descriptions)
else:
# Ignore unknown command, as it might have been for other bots
return
await ctx.send(embed=help_embed)
def setup(bot: DCSServerBot):
# help is only available on the master
if bot.config.getboolean('BOT', 'MASTER') is True:
bot.add_cog(Help('help', bot))
|
the-stack_106_16526
|
import os
from os.path import join as pjoin
import collections
import json
import torch
import numpy as np
import scipy.misc as m
import scipy.io as io
import matplotlib.pyplot as plt
import glob
from PIL import Image
from tqdm import tqdm
from torch.utils import data
from torchvision import transforms
SBD_PATH = '/mnt/DATA/VOC/benchmark_RELEASE'
class pascalVOCLoader(data.Dataset):
"""Data loader for the Pascal VOC semantic segmentation dataset.
Annotations from both the original VOC data (which consist of RGB images
in which colours map to specific classes) and the SBD (Berkely) dataset
(where annotations are stored as .mat files) are converted into a common
`label_mask` format. Under this format, each mask is an (M,N) array of
integer values from 0 to 21, where 0 represents the background class.
The label masks are stored in a new folder, called `pre_encoded`, which
is added as a subdirectory of the `SegmentationClass` folder in the
original Pascal VOC data layout.
A total of five data splits are provided for working with the VOC data:
train: The original VOC 2012 training data - 1464 images
val: The original VOC 2012 validation data - 1449 images
trainval: The combination of `train` and `val` - 2913 images
train_aug: The unique images present in both the train split and
training images from SBD: - 8829 images (the unique members
of the result of combining lists of length 1464 and 8498)
train_aug_val: The original VOC 2012 validation data minus the images
present in `train_aug` (This is done with the same logic as
the validation set used in FCN PAMI paper, but with VOC 2012
rather than VOC 2011) - 904 images
"""
def __init__(
self,
root,
split="train_aug",
is_transform=False,
img_size=512,
augmentations=None,
img_norm=True,
test_mode=False,
):
self.root = root
self.sbd_path = SBD_PATH
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.img_norm = img_norm
self.test_mode = test_mode
self.n_classes = 21
self.mean = np.array([104.00699, 116.66877, 122.67892])
self.files = collections.defaultdict(list)
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
if not self.test_mode:
for split in ["train", "val", "trainval"]:
path = pjoin(self.root, "ImageSets/Segmentation", split + ".txt")
file_list = tuple(open(path, "r"))
file_list = [id_.rstrip() for id_ in file_list]
self.files[split] = file_list
self.setup_annotations()
self.tf = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
im_name = self.files[self.split][index]
im_path = pjoin(self.root, "JPEGImages", im_name + ".jpg")
lbl_path = pjoin(self.root, "SegmentationClass/pre_encoded", im_name + ".png")
im = Image.open(im_path)
lbl = Image.open(lbl_path)
if self.augmentations is not None:
im, lbl = self.augmentations(im, lbl)
if self.is_transform:
im, lbl = self.transform(im, lbl)
return im, lbl
def transform(self, img, lbl):
if self.img_size == ("same", "same"):
pass
else:
img = img.resize((self.img_size[0], self.img_size[1])) # uint8 with RGB mode
lbl = lbl.resize((self.img_size[0], self.img_size[1]))
img = self.tf(img)
lbl = torch.from_numpy(np.array(lbl)).long()
lbl[lbl == 255] = 0
return img, lbl
def get_pascal_labels(self):
"""Load the mapping that associates pascal classes with label colors
Returns:
np.ndarray with dimensions (21, 3)
"""
return np.asarray(
[
[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128],
]
)
def encode_segmap(self, mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(self.get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def decode_segmap(self, label_mask, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
label_colours = self.get_pascal_labels()
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, self.n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def setup_annotations(self):
"""Sets up Berkley annotations by adding image indices to the
`train_aug` split and pre-encode all segmentation labels into the
common label_mask format (if this has not already been done). This
function also defines the `train_aug` and `train_aug_val` data splits
according to the description in the class docstring
"""
sbd_path = self.sbd_path
target_path = pjoin(self.root, "SegmentationClass/pre_encoded")
if not os.path.exists(target_path):
os.makedirs(target_path)
path = pjoin(sbd_path, "dataset/train.txt")
sbd_train_list = tuple(open(path, "r"))
sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]
train_aug = self.files["train"] + sbd_train_list
# keep unique elements (stable)
train_aug = [train_aug[i] for i in sorted(np.unique(train_aug, return_index=True)[1])]
self.files["train_aug"] = train_aug
set_diff = set(self.files["val"]) - set(train_aug) # remove overlap
self.files["train_aug_val"] = list(set_diff)
pre_encoded = glob.glob(pjoin(target_path, "*.png"))
expected = np.unique(self.files["train_aug"] + self.files["val"]).size
if len(pre_encoded) != expected:
print("Pre-encoding segmentation masks...")
for ii in tqdm(sbd_train_list):
lbl_path = pjoin(sbd_path, "dataset/cls", ii + ".mat")
data = io.loadmat(lbl_path)
lbl = data["GTcls"][0]["Segmentation"][0].astype(np.int32)
lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())
m.imsave(pjoin(target_path, ii + ".png"), lbl)
for ii in tqdm(self.files["trainval"]):
fname = ii + ".png"
lbl_path = pjoin(self.root, "SegmentationClass", fname)
lbl = self.encode_segmap(m.imread(lbl_path))
lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())
m.imsave(pjoin(target_path, fname), lbl)
assert expected == 9733, "unexpected dataset sizes"
# Leave code for debugging purposes
# import ptsemseg.augmentations as aug
# if __name__ == '__main__':
# # local_path = '/home/meetshah1995/datasets/VOCdevkit/VOC2012/'
# bs = 4
# augs = aug.Compose([aug.RandomRotate(10), aug.RandomHorizontallyFlip()])
# dst = pascalVOCLoader(root=local_path, is_transform=True, augmentations=augs)
# trainloader = data.DataLoader(dst, batch_size=bs)
# for i, data in enumerate(trainloader):
# imgs, labels = data
# imgs = imgs.numpy()[:, ::-1, :, :]
# imgs = np.transpose(imgs, [0,2,3,1])
# f, axarr = plt.subplots(bs, 2)
# for j in range(bs):
# axarr[j][0].imshow(imgs[j])
# axarr[j][1].imshow(dst.decode_segmap(labels.numpy()[j]))
# plt.show()
# a = raw_input()
# if a == 'ex':
# break
# else:
# plt.close()
|
the-stack_106_16527
|
"""Module with microscopic traffic simulation class and additional features."""
from dataclasses import dataclass
from typing import Tuple
import random
import math
from .section import Section
from .vehicle import Vehicle
from model_and_simulate.utilities.simulation import Simulation, SimulationParameters
class TrafficSimulation(Simulation):
"""Class for the actual simulation of vehicles on a road."""
density_max: float = 1 / 7.5 # vehicles per meter
velocity_max: int = 5 # maximum number of cells to move
def __init__(
self, length: float, occupation: float, dawdling_factor: float, all_vehicles_at_once: bool
):
self._section = Section(
length, TrafficSimulation.velocity_max, TrafficSimulation.density_max
)
self._dawdling_factor = dawdling_factor
self._vehicles, self._number_of_vehicles = self._init_vehicles(
all_vehicles_at_once, occupation
)
self._all_vehicles_set = self._check_if_all_vehicles_set()
@property
def dim(self) -> Tuple[Tuple[int, int], Tuple[int, int]]:
"""The dimension of the simulated area as (x_min, x_max),(y_min, y_max)."""
length = round(self._section.length)
cell_size = round(self._section.get_cell(0).size)
return (0, length), (0, cell_size)
@property
def number_of_vehicles(self) -> int:
"""The current number of vehicles placed on `self._section`."""
return self._number_of_vehicles
@property
def vehicles(self) -> list[Vehicle]:
"""The currently set vehicles."""
return self._vehicles[: self.number_of_vehicles]
@property
def section(self) -> Section:
"""Returns the section instance that is simulated."""
return self._section
def _check_if_all_vehicles_set(self):
return self._number_of_vehicles == len(self._vehicles)
def _init_vehicles(
self, all_vehicles_at_once: bool, occupation: float
) -> tuple[list[Vehicle], int]:
number_of_vehicles = int(math.floor(occupation * self._section.max_cell_number))
if all_vehicles_at_once:
vehicles = self._place_all_vehicles(number_of_vehicles)
else:
vehicles = [
Vehicle(i, self.velocity_max, self.velocity_max, self._dawdling_factor)
for i in range(number_of_vehicles)
]
self._place_first_two_vehicles(vehicles[0], vehicles[1])
number_of_vehicles = 2
return vehicles, number_of_vehicles
def _place_first_two_vehicles(self, left_vehicle: Vehicle, right_vehicle: Vehicle) -> None:
left_cell = self._section.get_cell(0)
right_cell = self._section.get_cell(round(self._section.max_cell_number / 2))
left_vehicle.place_into_cell(left_cell)
right_vehicle.place_into_cell(right_cell)
left_vehicle.successor = right_vehicle
right_vehicle.successor = left_vehicle
def _place_all_vehicles(self, number_of_vehicles: int) -> list[Vehicle]:
vehicles = []
cell_sample = random.sample(self._section.cells, number_of_vehicles)
cell_sample = sorted(cell_sample, key=lambda elem: elem.number, reverse=True)
successor = None
for cell, ident in zip(cell_sample, range(number_of_vehicles - 1, -1, -1)):
init_velocity = random.randint(0, self.velocity_max)
vehicle = Vehicle(ident, init_velocity, self.velocity_max, self._dawdling_factor)
vehicle.place_into_cell(cell)
vehicle.successor = successor
successor = vehicle
vehicles.insert(0, vehicle)
# set the car on the left as successor of the car on the right
rightmost_vehicle = vehicles[-1]
rightmost_vehicle.successor = vehicles[0]
return vehicles
def do_step(self) -> None:
"""Place another vehicle if density is not reached and update all vehicles."""
if not self._all_vehicles_set:
self._place_one_vehicle()
self._all_vehicles_set = self._check_if_all_vehicles_set()
for vehicle in self.vehicles:
vehicle.update_velocity(self._section.max_cell_number)
for vehicle in self.vehicles:
vehicle.move(self._section)
def _place_one_vehicle(self) -> None:
max_distance = 0
vehicle_predecessor = None
for vehicle in self.vehicles:
distance = vehicle.distance_to_successor(self._section.max_cell_number)
if distance > max_distance:
max_distance = distance
vehicle_predecessor = vehicle
distance_to_place = round(max_distance / 2)
if distance_to_place > 0:
vehicle_to_place = self._vehicles[self._number_of_vehicles]
self._number_of_vehicles += 1
cell = self._section.get_cell(vehicle_predecessor.position + distance_to_place)
vehicle_to_place.place_into_cell(cell)
vehicle_to_place.successor = vehicle_predecessor.successor
vehicle_predecessor.successor = vehicle_to_place
@dataclass
class TrafficParameters(SimulationParameters):
"""Class for keeping track of the simulation parameters in menus."""
length: int = 2250 # 2250 default 3500 max 300 min
occupation: float = 0.2 # 0.2 default 0.99 max 0.10 min
dawdling_factor: float = 0.2 # 0.2 default 0.99 max and 0.00 min
all_vehicles_at_once: bool = True # True default
|
the-stack_106_16528
|
#!/usr/bin/env python
u"""
esa_costg_swarm_sync.py
Written by Tyler Sutterley (10/2021)
Syncs Swarm gravity field products from the ESA Swarm Science Server
https://earth.esa.int/eogateway/missions/swarm/data
https://www.esa.int/Applications/Observing_the_Earth/Swarm
CALLING SEQUENCE:
python esa_costg_swarm_sync.py
COMMAND LINE OPTIONS:
--help: list the command line options
-D X, --directory X: working data directory
-r X, --release X: Data release to sync
-t X, --timeout X: Timeout in seconds for blocking operations
-l, --log: output log of files downloaded
-L, --list: print files to be transferred, but do not execute transfer
-C, --clobber: Overwrite existing data in transfer
--checksum: compare hashes to check if overwriting existing data
-M X, --mode=X: Local permissions mode of the directories and files synced
PYTHON DEPENDENCIES:
lxml: Pythonic XML and HTML processing library using libxml2/libxslt
https://lxml.de/
https://github.com/lxml/lxml
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
UPDATE HISTORY:
Updated 10/2021: using python logging for handling verbose output
Written 09/2021
"""
from __future__ import print_function
import sys
import re
import os
import io
import json
import time
import shutil
import logging
import argparse
import posixpath
import lxml.etree
import gravity_toolkit.utilities
#-- PURPOSE: sync local Swarm files with ESA server
def esa_costg_swarm_sync(DIRECTORY, RELEASE=None, TIMEOUT=None, LOG=False,
LIST=False, CLOBBER=False, CHECKSUM=False, MODE=0o775):
#-- local directory for exact data product
local_dir = os.path.join(DIRECTORY,'Swarm',RELEASE,'GSM')
#-- check if directory exists and recursively create if not
os.makedirs(local_dir,MODE) if not os.path.exists(local_dir) else None
#-- create log file with list of synchronized files (or print to terminal)
if LOG:
#-- output to log file
#-- format: ESA_Swarm_sync_2002-04-01.log
today = time.strftime('%Y-%m-%d',time.localtime())
LOGFILE = 'ESA_Swarm_sync_{0}.log'.format(today)
logging.basicConfig(filename=os.path.join(DIRECTORY,LOGFILE),
level=logging.INFO)
logging.info('ESA Swarm Sync Log ({0})'.format(today))
else:
#-- standard output (terminal output)
logging.basicConfig(level=logging.INFO)
#-- Swarm Science Server url
#-- using the JSON api protocols to retrieve files
#-- static site is no longer available
HOST = 'https://swarm-diss.eo.esa.int'
#-- compile xml parsers for lxml
XMLparser = lxml.etree.XMLParser()
#-- create "opener" (OpenerDirector instance)
gravity_toolkit.utilities.build_opener(None, None,
authorization_header=False, urs=HOST)
#-- All calls to urllib2.urlopen will now use handler
#-- Make sure not to include the protocol in with the URL, or
#-- HTTPPasswordMgrWithDefaultRealm will be confused.
#-- compile regular expression operator for files
swarm_data = r'(SW)_(.*?)_(EGF_SHA_2)__(.*?)_(.*?)_(.*?)(\.gfc|\.ZIP)'
R1 = re.compile(swarm_data, re.VERBOSE)
#-- create combined list of filenames and last modified times
colnames = []
collastmod = []
#-- position, maximum number of files to list, flag to check if done
pos,maxfiles,prevmax = (0,500,500)
#-- iterate to get a compiled list of files
#-- will iterate until there are no more files to add to the lists
while (maxfiles == prevmax):
#-- set previous flag to maximum
prevmax = maxfiles
#-- open connection with Swarm science server at remote directory
#-- to list maxfiles number of files at position
parameters = gravity_toolkit.utilities.urlencode({'maxfiles':prevmax,
'pos':pos,'file':posixpath.join('swarm','Level2longterm','EGF')})
url=posixpath.join(HOST,'?do=list&{0}'.format(parameters))
request = gravity_toolkit.utilities.urllib2.Request(url=url)
response = gravity_toolkit.utilities.urllib2.urlopen(request,
timeout=TIMEOUT)
table = json.loads(response.read().decode())
#-- extend lists with new files
colnames.extend([t['name'] for t in table['results']])
collastmod.extend([t['mtime'] for t in table['results']])
#-- update maximum number of files
maxfiles = len(table['results'])
#-- update position
pos += maxfiles
#-- find lines of valid files
valid_lines = [i for i,f in enumerate(colnames) if R1.match(f)]
#-- write each file to an index
fid = open(os.path.join(local_dir,'index.txt'),'w')
#-- for each data and header file
for i in valid_lines:
#-- remote and local versions of the file
parameters = gravity_toolkit.utilities.urlencode({'file':
posixpath.join('swarm','Level2longterm','EGF',colnames[i])})
remote_file = posixpath.join(HOST,
'?do=download&{0}'.format(parameters))
local_file = os.path.join(local_dir,colnames[i])
#-- check that file is not in file system unless overwriting
http_pull_file(remote_file, collastmod[i], local_file,
TIMEOUT=TIMEOUT, LIST=LIST, CLOBBER=CLOBBER,
CHECKSUM=CHECKSUM, MODE=MODE)
#-- output Swarm filenames to index
print('{0}'.format(colnames[i]), file=fid)
#-- change permissions of index file
os.chmod(os.path.join(local_dir,'index.txt'), MODE)
#-- close log file and set permissions level to MODE
if LOG:
os.chmod(os.path.join(DIRECTORY,LOGFILE), MODE)
#-- PURPOSE: pull file from a remote host checking if file exists locally
#-- and if the remote file is newer than the local file
def http_pull_file(remote_file, remote_mtime, local_file, TIMEOUT=120,
LIST=False, CLOBBER=False, CHECKSUM=False, MODE=0o775):
#-- if file exists in file system: check if remote file is newer
TEST = False
OVERWRITE = ' (clobber)'
#-- check if local version of file exists
if CHECKSUM and os.access(local_file, os.F_OK):
#-- generate checksum hash for local file
#-- open the local_file in binary read mode
local_hash = gravity_toolkit.utilities.get_hash(local_file)
#-- Create and submit request.
#-- There are a wide range of exceptions that can be thrown here
#-- including HTTPError and URLError.
req=gravity_toolkit.utilities.urllib2.Request(remote_file)
resp=gravity_toolkit.utilities.urllib2.urlopen(req,timeout=TIMEOUT)
#-- copy remote file contents to bytesIO object
remote_buffer = io.BytesIO(resp.read())
remote_buffer.seek(0)
#-- generate checksum hash for remote file
remote_hash = gravity_toolkit.utilities.get_hash(remote_buffer)
#-- compare checksums
if (local_hash != remote_hash):
TEST = True
OVERWRITE = ' (checksums: {0} {1})'.format(local_hash,remote_hash)
elif os.access(local_file, os.F_OK):
#-- check last modification time of local file
local_mtime = os.stat(local_file).st_mtime
#-- if remote file is newer: overwrite the local file
if (gravity_toolkit.utilities.even(remote_mtime) >
gravity_toolkit.utilities.even(local_mtime)):
TEST = True
OVERWRITE = ' (overwrite)'
else:
TEST = True
OVERWRITE = ' (new)'
#-- if file does not exist locally, is to be overwritten, or CLOBBER is set
if TEST or CLOBBER:
#-- Printing files transferred
logging.info('{0} --> '.format(remote_file))
logging.info('\t{0}{1}\n'.format(local_file,OVERWRITE))
#-- if executing copy command (not only printing the files)
if not LIST:
#-- chunked transfer encoding size
CHUNK = 16 * 1024
#-- copy bytes or transfer file
if CHECKSUM and os.access(local_file, os.F_OK):
#-- store bytes to file using chunked transfer encoding
remote_buffer.seek(0)
with open(local_file, 'wb') as f:
shutil.copyfileobj(remote_buffer, f, CHUNK)
else:
#-- Create and submit request.
#-- There are a range of exceptions that can be thrown here
#-- including HTTPError and URLError.
request = gravity_toolkit.utilities.urllib2.Request(remote_file)
response = gravity_toolkit.utilities.urllib2.urlopen(request,
timeout=TIMEOUT)
#-- copy remote file contents to local file
with open(local_file, 'wb') as f:
shutil.copyfileobj(response, f, CHUNK)
#-- keep remote modification time of file and local access time
os.utime(local_file, (os.stat(local_file).st_atime, remote_mtime))
os.chmod(local_file, MODE)
#-- Main program that calls esa_costg_swarm_sync()
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Syncs Swarm gravity field products from the
ESA Swarm Science Server
"""
)
#-- command line parameters
#-- working data directory
parser.add_argument('--directory','-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Working data directory')
#-- data release
parser.add_argument('--release','-r',
type=str, default='RL01', choices=['RL01'],
help='Data release to sync')
#-- connection timeout
parser.add_argument('--timeout','-t',
type=int, default=360,
help='Timeout in seconds for blocking operations')
#-- Output log file in form
#-- ESA_Swarm_sync_2002-04-01.log
parser.add_argument('--log','-l',
default=False, action='store_true',
help='Output log file')
#-- sync options
parser.add_argument('--list','-L',
default=False, action='store_true',
help='Only print files that could be transferred')
parser.add_argument('--clobber','-C',
default=False, action='store_true',
help='Overwrite existing data in transfer')
parser.add_argument('--checksum',
default=False, action='store_true',
help='Compare hashes to check for overwriting existing data')
#-- permissions mode of the directories and files synced (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='Permission mode of directories and files synced')
args = parser.parse_args()
#-- check internet connection before attempting to run program
HOST = 'https://swarm-diss.eo.esa.int'
if gravity_toolkit.utilities.check_connection(HOST):
esa_costg_swarm_sync(args.directory, RELEASE=args.release,
TIMEOUT=args.timeout, LOG=args.log, LIST=args.list,
CLOBBER=args.clobber, CHECKSUM=args.checksum, MODE=args.mode)
#-- run main program
if __name__ == '__main__':
main()
|
the-stack_106_16529
|
#!/usr/bin/env python3
import sys
import os
# load parent path of KicadModTree
sys.path.append(os.path.join(sys.path[0], "..", ".."))
from KicadModTree import *
def plcc4(args):
footprint_name = args["name"]
pkgWidth = args["pkg_width"]
pkgHeight = args["pkg_height"]
padXSpacing = args["pad_x_spacing"]
padYSpacing = args["pad_y_spacing"]
padWidth = args["pad_width"]
padHeight = args["pad_height"]
pads_clockwise = args["pads_clockwise"]
desc = str(pkgWidth) + "mm x " + str(pkgHeight) + "mm PLCC4 LED, "
f = Footprint(footprint_name)
f.setDescription(desc + args["datasheet"])
f.setTags("LED Cree PLCC-4")
f.setAttribute("smd")
f.append(Model(filename="${KICAD6_3DMODEL_DIR}/LEDs.3dshapes/" + footprint_name + ".wrl",
at=[0.0, 0.0, 0.0],
scale=[1.0, 1.0, 1.0],
rotate=[0.0, 0.0, 0.0]))
p = [padWidth, padHeight]
r = pkgHeight * 0.4
s = [1.0, 1.0]
sFabRef = [0.5, 0.5]
t1 = 0.075
t2 = 0.15
wCrtYd = 0.05
wFab = 0.1
wSilkS = 0.12
crtYd = 0.25
silkClearance = 0.2
xCenter = 0.0
xPadRight = padXSpacing / 2
xFabRight = pkgWidth / 2
xSilkRight = xPadRight + padWidth / 2 + silkClearance
xRightCrtYd = xSilkRight + crtYd
xLeftCrtYd = -xRightCrtYd
xSilkLeft = -xSilkRight
xPadLeft = -xPadRight
xFabLeft = -xFabRight
xChamfer = xFabLeft + 1.0
yCenter = 0.0
yPadBottom = padYSpacing / 2
yFabBottom = pkgHeight / 2
ySilkBottom = max(yFabBottom + 0.1,
yPadBottom + padHeight / 2 + silkClearance)
yBottomCrtYd = ySilkBottom + crtYd
yTopCrtYd = -yBottomCrtYd
ySilkTop = -ySilkBottom
yFabTop = -yFabBottom
yPadTop = -yPadBottom
yChamfer = yFabTop + 1
yValue = yFabBottom + 1.25
yRef = yFabTop - 1.25
f.append(Text(type="reference", text="REF**", at=[xCenter, yRef],
layer="F.SilkS", size=s, thickness=t2))
f.append(Text(type="value", text=footprint_name, at=[xCenter, yValue],
layer="F.Fab", size=s, thickness=t2))
f.append(Text(type="user", text="%R", at=[xCenter, yCenter],
layer="F.Fab", size=sFabRef, thickness=t1))
f.append(RectLine(start=[xLeftCrtYd, yTopCrtYd],
end=[xRightCrtYd, yBottomCrtYd],
layer="F.CrtYd", width=wCrtYd))
f.append(Line(start=[xChamfer, yFabTop],
end=[xFabLeft, yChamfer],
layer="F.Fab", width=wFab))
f.append(RectLine(start=[xFabLeft, yFabTop],
end=[xFabRight, yFabBottom],
layer="F.Fab", width=wFab))
f.append(Circle(center=[xCenter, yCenter], radius=r,
layer="F.Fab", width=wFab))
f.append(PolygoneLine(polygone=[[xSilkLeft, yPadTop],
[xSilkLeft, ySilkTop],
[xSilkRight, ySilkTop]],
layer="F.SilkS", width=wSilkS))
f.append(Line(start=[xSilkLeft, ySilkBottom],
end=[xSilkRight, ySilkBottom],
layer="F.SilkS", width=wSilkS))
if pads_clockwise:
pads = ["1", "2", "3", "4"]
else:
pads = ["1", "4", "3", "2"]
f.append(Pad(number=pads[0], type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT,
at=[xPadLeft, yPadTop], size=p, layers=Pad.LAYERS_SMT))
f.append(Pad(number=pads[1], type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT,
at=[xPadRight, yPadTop], size=p, layers=Pad.LAYERS_SMT))
f.append(Pad(number=pads[2], type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT,
at=[xPadRight, yPadBottom], size=p, layers=Pad.LAYERS_SMT))
f.append(Pad(number=pads[3], type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT,
at=[xPadLeft, yPadBottom], size=p, layers=Pad.LAYERS_SMT))
file_handler = KicadFileHandler(f)
file_handler.writeFile(footprint_name + ".kicad_mod")
if __name__ == '__main__':
parser = ModArgparser(plcc4)
# the root node of .yml files is parsed as name
parser.add_parameter("name", type=str, required=True)
parser.add_parameter("datasheet", type=str, required=True)
parser.add_parameter("pkg_width", type=float, required=False, default=2.0)
parser.add_parameter("pkg_height", type=float, required=False, default=2.0)
parser.add_parameter("pad_x_spacing", type=float, required=False, default=1.5)
parser.add_parameter("pad_y_spacing", type=float, required=False, default=1.1)
parser.add_parameter("pad_width", type=float, required=False, default=1.0)
parser.add_parameter("pad_height", type=float, required=False, default=0.8)
parser.add_parameter("pads_clockwise", type=bool, required=False, default=True)
# now run our script which handles the whole part of parsing the files
parser.run()
|
the-stack_106_16531
|
import argparse
import glob
import logging
import os
import torch
from quobert.model import BertForQuotationAttribution, evaluate
from quobert.utils.data import ConcatParquetDataset, ParquetDataset
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
default=None,
type=str,
required=True,
help="Path to pre-trained model",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model results will be written.",
)
parser.add_argument(
"--test_dir",
default=None,
type=str,
required=True,
help="The input test directory. Should contain (.gz.parquet) files",
)
parser.add_argument(
"--output_speakers_only",
action="store_true",
help="If set, only output the top1 speakers instead of the probabilities associated",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=128,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(f"Started loading the dataset from {args.test_dir}")
files = glob.glob(os.path.join(args.test_dir, "**.gz.parquet"))
datasets = [ParquetDataset(f) for f in files]
concat_dataset = ConcatParquetDataset(datasets)
model = BertForQuotationAttribution.from_pretrained(args.model_dir)
model.to(args.device)
args.output_file = os.path.join(args.output_dir, f"results.csv")
evaluate(args, model, concat_dataset, output_proba=not args.output_speakers_only)
# logger.info(f"EM: {result * 100:.2f}%")
|
the-stack_106_16532
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from data_source import DataSource
from future import Future
from operator import itemgetter
import docs_server_utils as utils
class APIListDataSource(DataSource):
""" This class creates a list of chrome.* APIs and chrome.experimental.* APIs
for extensions and apps that are used in the api_index.html,
experimental.html, and private_apis.html pages.
An API is considered listable if it is listed in _api_features.json,
it has a corresponding HTML file in the public template path, and one of
the following conditions is met:
- It has no "dependencies" or "extension_types" properties in _api_features
- It has an "extension_types" property in _api_features with either/both
"extension"/"platform_app" values present.
- It has a dependency in _{api,manifest,permission}_features with an
"extension_types" property where either/both "extension"/"platform_app"
values are present.
"""
def __init__(self, server_instance, _):
self._features_bundle = server_instance.features_bundle
self._object_store = server_instance.object_store_creator.Create(
APIListDataSource)
self._api_models = server_instance.api_models
self._api_categorizer = server_instance.api_categorizer
self._availability_finder = server_instance.availability_finder
def _GenerateAPIDict(self):
def get_channel_info(api_name):
return self._availability_finder.GetApiAvailability(api_name)
def get_api_platform(api_name):
feature = self._features_bundle.GetAPIFeatures().Get()[api_name]
return feature['platforms']
def make_dict_for_platform(platform):
platform_dict = {
'chrome': {'stable': [], 'beta': [], 'dev': [], 'trunk': []},
}
private_apis = []
experimental_apis = []
all_apis = []
for api_name, api_model in self._api_models.IterModels():
if not self._api_categorizer.IsDocumented(platform, api_name):
continue
api = {
'name': api_name,
'description': api_model.description,
'platforms': get_api_platform(api_name),
}
category = self._api_categorizer.GetCategory(platform, api_name)
if category == 'chrome':
channel_info = get_channel_info(api_name)
channel = channel_info.channel
if channel == 'stable':
version = channel_info.version
api['version'] = version
platform_dict[category][channel].append(api)
all_apis.append(api)
elif category == 'experimental':
experimental_apis.append(api)
all_apis.append(api)
elif category == 'private':
private_apis.append(api)
for channel, apis_by_channel in platform_dict['chrome'].iteritems():
apis_by_channel.sort(key=itemgetter('name'))
utils.MarkLast(apis_by_channel)
platform_dict['chrome'][channel] = apis_by_channel
for key, apis in (('all', all_apis),
('private', private_apis),
('experimental', experimental_apis)):
apis.sort(key=itemgetter('name'))
utils.MarkLast(apis)
platform_dict[key] = apis
return platform_dict
return {
'apps': make_dict_for_platform('apps'),
'extensions': make_dict_for_platform('extensions'),
}
def _GetCachedAPIData(self):
data_future = self._object_store.Get('api_data')
def resolve():
data = data_future.Get()
if data is None:
data = self._GenerateAPIDict()
self._object_store.Set('api_data', data)
return data
return Future(callback=resolve)
def get(self, key):
return self._GetCachedAPIData().Get().get(key)
def Cron(self):
return self._GetCachedAPIData()
|
the-stack_106_16535
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8320)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 5520)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
the-stack_106_16538
|
#!/usr/bin/env python3
import pytest
from runfile.exceptions import RunfileFormatError
from runfile.target import Target
@pytest.mark.parametrize("name,valid", [
['foo', True],
['foo bar', False],
['foo:bar', True],
['FooBar', True],
['foo-bar', False],
[':foo', False],
['bar_', False],
['hi', True],
['a', True]
])
def test_validate(name, valid):
if valid:
Target(name=name)
else:
with pytest.raises(RunfileFormatError):
Target(name=name)
|
the-stack_106_16543
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Isaac Saito, Ze'ev Klapow, Austin Hendrix
import os
import rospkg
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from python_qt_binding import loadUi
from python_qt_binding.QtCore import QTimer, Signal, Qt, Slot
from python_qt_binding.QtGui import QPalette
from python_qt_binding.QtWidgets import QWidget
import rospy
from .inspector_window import InspectorWindow
from .status_item import StatusItem
from .timeline_pane import TimelinePane
from .timeline import Timeline
import rqt_robot_monitor.util_robot_monitor as util
class RobotMonitorWidget(QWidget):
"""
NOTE: RobotMonitorWidget.shutdown function needs to be called
when the instance of this class terminates.
RobotMonitorWidget itself doesn't store previous diagnostic states.
It instead delegates that function to Timeline class.
"""
_TREE_ALL = 1
_TREE_WARN = 2
_TREE_ERR = 3
message_updated = Signal(DiagnosticArray)
def __init__(self, context, topic=None):
"""
:param context: plugin context hook to enable adding widgets as a
ROS_GUI pane, 'PluginContext'
:param topic: Diagnostic topic to subscribe to 'str'
"""
super(RobotMonitorWidget, self).__init__()
rp = rospkg.RosPack()
ui_file = os.path.join(rp.get_path('rqt_robot_supervisor_monitor'),'src', 'resource',
'robotmonitor_mainwidget.ui')
loadUi(ui_file, self)
obj_name = 'Robot Monitor'
self.setObjectName(obj_name)
self.setWindowTitle(obj_name)
self.message_updated.connect(self.message_cb)
# if we're given a topic, create a timeline. otherwise, don't
# this can be used later when writing an rqt_bag plugin
if topic:
# create timeline data structure
self._timeline = Timeline(topic, DiagnosticArray)
self._timeline.message_updated.connect(self.message_updated)
# create timeline pane widget
self.timeline_pane = TimelinePane(self)
self.timeline_pane.set_timeline(self._timeline)
self.vlayout_top.addWidget(self.timeline_pane)
self.timeline_pane.show()
else:
self._timeline = None
self.timeline_pane = None
self._inspectors = {}
# keep a copy of the current message for opening new inspectors
self._current_msg = None
self.tree_all_devices.itemDoubleClicked.connect(self._tree_clicked)
self.warn_flattree.itemDoubleClicked.connect(self._tree_clicked)
self.err_flattree.itemDoubleClicked.connect(self._tree_clicked)
# TODO: resize on itemCollapsed and itemExpanded
self._is_stale = False
self._timer = QTimer()
self._timer.timeout.connect(self._update_message_state)
self._timer.start(1000)
palette = self.tree_all_devices.palette()
self._original_base_color = palette.base().color()
self._original_alt_base_color = palette.alternateBase().color()
self._tree = StatusItem(self.tree_all_devices.invisibleRootItem())
self._warn_tree = StatusItem(self.warn_flattree.invisibleRootItem())
self._err_tree = StatusItem(self.err_flattree.invisibleRootItem())
@Slot(DiagnosticArray)
def message_cb(self, msg):
""" DiagnosticArray message callback """
self._current_msg = msg
# Walk the status array and update the tree
for status in msg.status:
# Compute path and walk to appropriate subtree
path = status.name.split('/')
if path[0] == '':
path = path[1:]
tmp_tree = self._tree
for p in path:
tmp_tree = tmp_tree[p]
tmp_tree.update(status, util.get_resource_name(status.name))
# Check for warnings
if status.level == DiagnosticStatus.WARN:
name = status.name
self._warn_tree[name].update(status, status.name)
# Check for errors
if (status.level == DiagnosticStatus.ERROR or
status.level == DiagnosticStatus.STALE):
name = status.name
self._err_tree[name].update(status, status.name)
# For any items in the tree that were not updated, remove them
self._tree.prune()
self._warn_tree.prune()
self._err_tree.prune()
# TODO(ahendrix): implement
# Insight: for any item that is not OK, it only provides additional
# information if all of it's children are OK
#
# otherwise, it's just an aggregation of its children
# and doesn't provide any additional value when added to
# the warning and error flat trees
self.tree_all_devices.resizeColumnToContents(0)
self.warn_flattree.resizeColumnToContents(0)
self.err_flattree.resizeColumnToContents(0)
def resizeEvent(self, evt):
"""Overridden from QWidget"""
rospy.logdebug('RobotMonitorWidget resizeEvent')
if self.timeline_pane:
self.timeline_pane.redraw()
@Slot(str)
def _inspector_closed(self, name):
""" Called when an inspector window is closed """
if name in self._inspectors:
del self._inspectors[name]
def _tree_clicked(self, item, column):
"""
Slot to QTreeWidget.itemDoubleClicked
:type item: QTreeWidgetItem
:type column: int
"""
rospy.logdebug('RobotMonitorWidget _tree_clicked col=%d', column)
if item.name in self._inspectors:
self._inspectors[item.name].activateWindow()
else:
self._inspectors[item.name] = InspectorWindow(self, item.name,
self._current_msg, self._timeline)
self._inspectors[item.name].closed.connect(self._inspector_closed)
self.message_updated.connect(self._inspectors[item.name].message_updated)
def _update_message_state(self):
""" Update the display if it's stale """
if self._timeline is not None:
if self._timeline.has_messages:
previous_stale_state = self._is_stale
self._is_stale = self._timeline.is_stale
time_diff = int(self._timeline.data_age())
msg_template = "Last message received %s %s ago"
if time_diff == 1:
msg = msg_template % (time_diff, "second")
else:
msg = msg_template % (time_diff, "seconds")
self.timeline_pane._msg_label.setText(msg)
if previous_stale_state != self._is_stale:
self._update_background_color()
else:
# no messages received yet
self.timeline_pane._msg_label.setText("No messages received")
def _update_background_color(self):
""" Update the background color based on staleness """
p = self.tree_all_devices.palette()
if self._is_stale:
p.setColor(QPalette.Base, Qt.darkGray)
p.setColor(QPalette.AlternateBase, Qt.lightGray)
else:
p.setColor(QPalette.Base, self._original_base_color)
p.setColor(QPalette.AlternateBase, self._original_alt_base_color)
self.tree_all_devices.setPalette(p)
self.warn_flattree.setPalette(p)
self.err_flattree.setPalette(p)
def shutdown(self):
"""
This needs to be called whenever this class terminates.
This closes all the instances on all trees.
Also unregisters ROS' subscriber, stops timer.
"""
rospy.logdebug('RobotMonitorWidget in shutdown')
names = self._inspectors.keys()
for name in names:
self._inspectors[name].close()
if self._timeline:
self._timeline.shutdown()
self._timer.stop()
del self._timer
def save_settings(self, plugin_settings, instance_settings):
instance_settings.set_value('splitter', self.splitter.saveState())
# TODO(ahendrix): persist the device paths, positions and sizes of any
# inspector windows
def restore_settings(self, plugin_settings, instance_settings):
if instance_settings.contains('splitter'):
self.splitter.restoreState(instance_settings.value('splitter'))
else:
self.splitter.setSizes([100, 100, 200])
# TODO(ahendrix): restore inspector windows
|
the-stack_106_16547
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Hicham Belhseine"
__email__ = "[email protected]"
import logging
import time
import multiprocessing
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.crazyflie.syncLogger import SyncLogger
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.gridspec import GridSpec
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import style
from matplotlib import use
import numpy as np
import cftune
import cflog
# design requirements
overshoot_tgt = .1 # 10% overshoot
rise_time_tgt = 1 # 1s rise time (5% -> 95%)
settle_time_tgt = 3 # 3s settling time (5%)
style.use('seaborn-whitegrid')
URI = 'radio://0/80/2M/E7E7E7E711'
alt_takeoff = .5 # target takeoff altitude [m]
alt_target = 1 # setpoint altitude [m]
# x y z YAW [m, m, m, deg]
setpoint_pos = np.array(
[.5, .5, alt_target, 0]
)
#
# Crazyflie control methods
#
def takeoff():
for i in range(60):
cf.commander.send_position_setpoint(.5,
.5,
alt_takeoff,
0)
time.sleep(.1)
def land():
for i in range(10):
cf.commander.send_position_setpoint(.5,
.5,
0.1,
0.0)
time.sleep(0.1)
def alt_setpoint(cf, t):
alt_sp = setpoint_pos + np.array([0, 0, alt_takeoff, 0])
time_start = time.time()
while time.time() < (time_start + t):
cf.commander.send_position_setpoint(alt_sp[0],
alt_sp[1],
alt_sp[2],
alt_sp[3])
time.sleep(0.1)
time.sleep(0.1)
def wait_for_position_estimator(scf):
print('Waiting for estimator to find position...')
log_config = LogConfig(name='Kalman Variance', period_in_ms=500)
log_config.add_variable('kalman.varPX', 'float')
log_config.add_variable('kalman.varPY', 'float')
log_config.add_variable('kalman.varPZ', 'float')
var_y_history = [1000] * 10
var_x_history = [1000] * 10
var_z_history = [1000] * 10
threshold = 0.001
with SyncLogger(scf, log_config) as logger:
for log_entry in logger:
data = log_entry[1]
var_x_history.append(data['kalman.varPX'])
var_x_history.pop(0)
var_y_history.append(data['kalman.varPY'])
var_y_history.pop(0)
var_z_history.append(data['kalman.varPZ'])
var_z_history.pop(0)
min_x = min(var_x_history)
max_x = max(var_x_history)
min_y = min(var_y_history)
max_y = max(var_y_history)
min_z = min(var_z_history)
max_z = max(var_z_history)
if (max_x - min_x) < threshold and (
max_y - min_y) < threshold and (
max_z - min_z) < threshold:
break
print("Estimator reset.")
def reset_estimator(scf):
cf = scf.cf
cf.param.set_value('kalman.resetEstimation', '1')
time.sleep(0.1)
cf.param.set_value('kalman.resetEstimation', '0')
wait_for_position_estimator(cf)
#
# Plotting methods
#
def start_plots(x, y, z, pos_ts, tX, tY, tZ, tar_ts):
position = [x, y, z, pos_ts]
setpoint = [tX, tY, tZ, tar_ts]
fig = plt.figure()
fig.set_size_inches(15, 8)
gs = GridSpec(2, 4)
ax_x = fig.add_subplot(gs[0, 0])
ax_y = fig.add_subplot(gs[0, 1])
ax_z = fig.add_subplot(gs[1, :-2])
ax_3d = fig.add_subplot(gs[0:, 2:], projection='3d')
ani = FuncAnimation(fig, plot, interval=100,
fargs=(ax_x, ax_y, ax_z, ax_3d,
position, setpoint))
plt.show()
def plot(i, ax_x, ax_y, ax_z, ax_3d, position, setpoint):
x, y, z, pos_ts = position
tX, tY, tZ, tar_ts = setpoint
ax_x.clear()
ax_y.clear()
ax_z.clear()
ax_3d.clear()
x_line, = ax_x.plot(np.subtract(pos_ts, pos_ts[0]) / 1000, x)
targetX_line, = ax_x.plot(np.subtract(tar_ts, tar_ts[0]) / 1000, tX)
ax_x.set_title("X Position Time History")
ax_x.set_xlabel("Time Elapsed (seconds)")
ax_x.set_ylabel("Local X Position (m)")
ax_x.legend((x_line, targetX_line),
("Local X Position", "Local X Setpoint"))
y_line, = ax_y.plot(np.subtract(pos_ts, pos_ts[0]) / 1000, y)
targetY_line, = ax_y.plot(np.subtract(tar_ts, tar_ts[0]) / 1000, tY)
ax_y.set_title("Y Position Time History")
ax_y.set_xlabel("Time Elapsed (seconds)")
ax_y.set_ylabel("Local Y Position (m)")
ax_y.legend((y_line, targetY_line),
("Local Y Position", "Local Y Setpoint"))
z_line, = ax_z.plot(np.subtract(pos_ts, pos_ts[0]) / 1000, z)
targetZ_line, = ax_z.plot(np.subtract(tar_ts, tar_ts[0]) / 1000, tZ)
ax_z.set_title("Z Position Time History")
ax_z.set_xlabel("Time Elapsed (seconds)")
ax_z.set_ylabel("Local Z Position (m)")
ax_z.legend((z_line, targetZ_line),
("Local Z Position", "Local Z Setpoint"))
ax_3d.plot(x[-50:], y[-50:], z[-50:], label="Quadrotor Position")
ax_3d.set_xlim3d(-3, 3)
ax_3d.set_ylim3d(-3, 3)
ax_3d.set_zlim3d(0, 3)
ax_3d.legend(['x', 'y', 'z'])
def plot_step_response(tuner):
# Get trial PID results and params
timestamp, response, setpoint = tuner.get_response()
Kp, Ki, Kd = tuner.get_alt_pid()
# Get step response info
response = np.array(response) - alt_takeoff
setpoint = np.array(setpoint) - alt_takeoff
rise_time, e_ss, p_over, settle_time = tuner.step_info(timestamp,
response,
0,
alt_target) # noqa
fig = plt.figure()
fig.set_size_inches(14, 10.5)
ax = plt.gca()
ax.plot(np.subtract(timestamp, timestamp[0]) / 1000, response,
label="Alt Response")
ax.plot(np.subtract(timestamp, timestamp[0]) / 1000, setpoint,
label="Alt Setpoint")
ax.set_title("Altitude Step Response, Kp={:1.2f} Ki={:1.2f} Kd={:1.2f}".format(Kp, Ki, Kd)) # noqa
ax.set_xlabel("Time Elapsed (seconds)")
ax.set_ylabel("Altitude (m)")
plt.suptitle("Rise Time: {:2.2f} s\nError SS: {:2.2f} m\nPercent Overshoot: {:1.2f}\nSettling Time: {:2.2f} s".format(rise_time, e_ss, p_over*100, settle_time)) # noqa
ax.legend()
fig.savefig("alt_ctl_step_" + time.strftime("%Y%m%d-%H%M%S"))
print("Close the plot window to continue")
plt.show()
def save_motor_data(t_strt, t_range, m1, m2, m3, m4, timestamps):
# get motor data only during step response recording
t_end = t_strt + (t_range * 1000)
idx_strt = (np.abs(np.array(timestamps) - t_strt)).argmin()
idx_end = (np.abs(np.array(timestamps) - t_end)).argmin()
timestamps = timestamps[idx_strt:idx_end]
m1 = m1[idx_strt:idx_end]
m2 = m2[idx_strt:idx_end]
m3 = m3[idx_strt:idx_end]
m4 = m4[idx_strt:idx_end]
m_all = np.add(m1, np.add(m2, np.add(m3, m4)))
fig = plt.figure()
fig.set_size_inches(11, 8)
ax = plt.gca()
ax.plot(np.subtract(timestamps, timestamps[0]) / 1000, m1,
label='Motor 1 Output')
ax.plot(np.subtract(timestamps, timestamps[0]) / 1000, m2,
label='Motor 2 Output')
ax.plot(np.subtract(timestamps, timestamps[0]) / 1000, m3,
label='Motor 3 Output')
ax.plot(np.subtract(timestamps, timestamps[0]) / 1000, m4,
label='Motor 4 Output')
ax.plot(np.subtract(timestamps, timestamps[0]) / 1000, m_all,
label='Combined Motor Ouput')
ax.set_title("Motor Response from Altitude Step Input")
ax.set_xlabel("Time Elapsed (Seconds)")
ax.set_ylabel("Motor Output")
ax.legend()
fig.savefig("motor_output_" + time.strftime("%Y%m%d-%H%M%S"))
if __name__ == "__main__":
# Initialize low-level drivers
cflib.crtp.init_drivers(enable_debug_driver=False)
with SyncCrazyflie(URI, cf=Crazyflie(rw_cache='./cache')) as scf:
# cf position and setpoint logger
log = cflog.CFLog(scf)
# PID analyzer and parameter manager
pidtune = cftune.PositionTuner(scf)
# Dirty implementation of cf data piping
x, y, z, pos_ts = log.get_position()
tX, tY, tZ, tar_ts = log.get_target_position()
m1, m2, m3, m4, motor_ts = log.get_motor_output()
time.sleep(1)
p_plot = multiprocessing.Process(target=start_plots,
args=(x, y, z, pos_ts,
tX, tY, tZ, tar_ts))
p_plot.daemon = True
p_plot.start()
cf = scf.cf
while True:
user_input = -1
print("Select an item:")
print("01) Takeoff and land while recording data.")
print("02) Set new PID parameters.")
print("10) Exit program")
try:
user_input = int(input("Item select: "))
except ValueError:
print("Error, Unknown Input")
continue
if user_input == 1:
Kp, Ki, Kd = pidtune.get_alt_pid()
print("Current z-position PID controller gains:")
print("\tKp: {:2.2f}".format(Kp))
print("\tKi: {:2.2f}".format(Ki))
print("\tKd: {:2.2f}".format(Kd))
reset_estimator(scf)
print("Taking off.")
takeoff()
pidtune.record_response()
print("Ascending to setpoint altitude.")
alt_setpoint(cf, 20) # takeoff for 20 seconds
print("Landing")
land()
# Flight data
timestamps, z, targetZ = pidtune.get_response()
rise_time, e_ss, p_over, settle_time = pidtune.step_info(
timestamps, np.array(z) - alt_takeoff, # noqa
0,
alt_target # noqa
)
print("Flight results:")
print("\tRise Time: {:2.2f} s, [{}]".format(rise_time,
'Success' if rise_time < rise_time_tgt else 'Failed'))
print("\tError SS: {:2.2f} m".format(e_ss))
print("\tOvershoot: {:2.2f} %, [{}]".format(p_over * 100,
'Success' if p_over < overshoot_tgt else 'Failed'))
print("\tSettling Time: {:2.2f} s, [{}]".format(settle_time,
'Success' if settle_time < settle_time_tgt else 'Failed')) # noqa
time.sleep(.5)
save_motor_data(timestamps[1], 15, m1, m2, m3, m4, motor_ts)
plot_step_response(pidtune)
elif user_input == 2:
# Updating cf posCtlZ PID gains
print("Enter new PID params")
Kp_new = float(input("New Kp: "))
Ki_new = float(input("New Ki: "))
Kd_new = float(input("New Kd: "))
pidtune.set_alt_pid(Kp_new, Ki_new, Kd_new)
elif user_input == 10:
print("Exiting Program.")
break
else:
print("Error, unknown input.")
|
the-stack_106_16548
|
from bolsonaro.data.dataset_parameters import DatasetParameters
from bolsonaro.data.dataset_loader import DatasetLoader
from bolsonaro.models.model_factory import ModelFactory
from bolsonaro.models.model_parameters import ModelParameters
from bolsonaro.models.ensemble_selection_forest_regressor import EnsembleSelectionForestRegressor
from bolsonaro.trainer import Trainer
from bolsonaro.utils import resolve_experiment_id, tqdm_joblib
from bolsonaro import LOG_PATH
from bolsonaro.error_handling.logger_factory import LoggerFactory
from dotenv import find_dotenv, load_dotenv
import argparse
import copy
import json
import pathlib
import random
import os
from joblib import Parallel, delayed
import threading
import json
from tqdm import tqdm
import numpy as np
import shutil
def seed_job(seed_job_pb, seed, parameters, experiment_id, hyperparameters, verbose):
"""
Experiment function.
Will be used as base function for worker in multithreaded application.
:param seed:
:param parameters:
:param experiment_id:
:return:
"""
logger = LoggerFactory.create(LOG_PATH, 'training_seed{}_ti{}'.format(
seed, threading.get_ident()))
seed_str = str(seed)
experiment_id_str = str(experiment_id)
models_dir = parameters['models_dir'] + os.sep + experiment_id_str + os.sep + 'seeds' + \
os.sep + seed_str
pathlib.Path(models_dir).mkdir(parents=True, exist_ok=True)
dataset_parameters = DatasetParameters(
name=parameters['dataset_name'],
test_size=parameters['test_size'],
dev_size=parameters['dev_size'],
random_state=seed,
dataset_normalizer=parameters['dataset_normalizer']
)
dataset_parameters.save(models_dir, experiment_id_str)
dataset = DatasetLoader.load(dataset_parameters)
trainer = Trainer(dataset)
if parameters['extraction_strategy'] == 'random':
pretrained_model_parameters = ModelParameters(
extracted_forest_size=parameters['forest_size'],
normalize_D=parameters['normalize_D'],
subsets_used=parameters['subsets_used'],
normalize_weights=parameters['normalize_weights'],
seed=seed,
hyperparameters=hyperparameters,
extraction_strategy=parameters['extraction_strategy']
)
pretrained_estimator = ModelFactory.build(dataset.task, pretrained_model_parameters)
pretrained_trainer = Trainer(dataset)
pretrained_trainer.init(pretrained_estimator, subsets_used=parameters['subsets_used'])
pretrained_estimator.fit(
X=pretrained_trainer._X_forest,
y=pretrained_trainer._y_forest
)
else:
pretrained_estimator = None
pretrained_model_parameters = None
if parameters['extraction_strategy'] == 'none':
forest_size = hyperparameters['n_estimators']
logger.info('Base forest training with fixed forest size of {}'.format(forest_size))
sub_models_dir = models_dir + os.sep + 'forest_size' + os.sep + str(forest_size)
# Check if the result file already exists
already_exists = False
if os.path.isdir(sub_models_dir):
sub_models_dir_files = os.listdir(sub_models_dir)
for file_name in sub_models_dir_files:
if file_name == 'model_raw_results.pickle':
already_exists = os.path.getsize(os.path.join(sub_models_dir, file_name)) > 0
break
else:
continue
if already_exists:
logger.info('Base forest result already exists. Skipping...')
else:
pathlib.Path(sub_models_dir).mkdir(parents=True, exist_ok=True)
model_parameters = ModelParameters(
extracted_forest_size=forest_size,
normalize_D=parameters['normalize_D'],
subsets_used=parameters['subsets_used'],
normalize_weights=parameters['normalize_weights'],
seed=seed,
hyperparameters=hyperparameters,
extraction_strategy=parameters['extraction_strategy']
)
model_parameters.save(sub_models_dir, experiment_id)
model = ModelFactory.build(dataset.task, model_parameters)
trainer.init(model, subsets_used=parameters['subsets_used'])
trainer.train(model)
trainer.compute_results(model, sub_models_dir)
elif parameters['extraction_strategy'] == 'omp_nn':
forest_size = hyperparameters['n_estimators']
model_parameters = ModelParameters(
extracted_forest_size=forest_size,
normalize_D=parameters['normalize_D'],
subsets_used=parameters['subsets_used'],
normalize_weights=parameters['normalize_weights'],
seed=seed,
hyperparameters=hyperparameters,
extraction_strategy=parameters['extraction_strategy'],
intermediate_solutions_sizes=parameters['extracted_forest_size']
)
model = ModelFactory.build(dataset.task, model_parameters)
trainer.init(model, subsets_used=parameters['subsets_used'])
trainer.train(model)
for extracted_forest_size in parameters['extracted_forest_size']:
sub_models_dir = models_dir + os.sep + 'extracted_forest_sizes' + os.sep + str(extracted_forest_size)
pathlib.Path(sub_models_dir).mkdir(parents=True, exist_ok=True)
trainer.compute_results(model, sub_models_dir, extracted_forest_size=extracted_forest_size)
model_parameters.save(sub_models_dir, experiment_id)
else:
with tqdm_joblib(tqdm(total=len(parameters['extracted_forest_size']), disable=not verbose)) as extracted_forest_size_job_pb:
Parallel(n_jobs=-1)(delayed(extracted_forest_size_job)(extracted_forest_size_job_pb, parameters['extracted_forest_size'][i],
models_dir, seed, parameters, dataset, hyperparameters, experiment_id, trainer,
pretrained_estimator=pretrained_estimator, pretrained_model_parameters=pretrained_model_parameters,
use_distillation=parameters['extraction_strategy'] == 'omp_distillation')
for i in range(len(parameters['extracted_forest_size'])))
logger.info(f'Training done for seed {seed_str}')
seed_job_pb.update(1)
def extracted_forest_size_job(extracted_forest_size_job_pb, extracted_forest_size, models_dir,
seed, parameters, dataset, hyperparameters, experiment_id, trainer,
pretrained_estimator=None, pretrained_model_parameters=None, use_distillation=False):
logger = LoggerFactory.create(LOG_PATH, 'training_seed{}_extracted_forest_size{}_ti{}'.format(
seed, extracted_forest_size, threading.get_ident()))
logger.info('extracted_forest_size={}'.format(extracted_forest_size))
sub_models_dir = models_dir + os.sep + 'extracted_forest_sizes' + os.sep + str(extracted_forest_size)
# Check if the result file already exists
already_exists = False
if os.path.isdir(sub_models_dir):
sub_models_dir_files = os.listdir(sub_models_dir)
for file_name in sub_models_dir_files:
if file_name == 'model_raw_results.pickle':
already_exists = os.path.getsize(os.path.join(sub_models_dir, file_name)) > 0
break
else:
continue
if already_exists:
logger.info(f'Extracted forest {extracted_forest_size} result already exists. Skipping...')
return
pathlib.Path(sub_models_dir).mkdir(parents=True, exist_ok=True)
if not pretrained_estimator:
model_parameters = ModelParameters(
extracted_forest_size=extracted_forest_size,
normalize_D=parameters['normalize_D'],
subsets_used=parameters['subsets_used'],
normalize_weights=parameters['normalize_weights'],
seed=seed,
hyperparameters=hyperparameters,
extraction_strategy=parameters['extraction_strategy']
)
model_parameters.save(sub_models_dir, experiment_id)
model = ModelFactory.build(dataset.task, model_parameters)
else:
model = copy.deepcopy(pretrained_estimator)
pretrained_model_parameters.save(sub_models_dir, experiment_id)
trainer.init(model, subsets_used=parameters['subsets_used'])
trainer.train(model, extracted_forest_size=extracted_forest_size, seed=seed,
use_distillation=use_distillation)
trainer.compute_results(model, sub_models_dir)
"""
Command lines example for stage 1:
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=none --save_experiment_configuration 1 none_with_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=random --save_experiment_configuration 1 random_with_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 1 omp_with_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=none --skip_best_hyperparams --save_experiment_configuration 1 none_wo_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=random --skip_best_hyperparams --save_experiment_configuration 1 random_wo_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --skip_best_hyperparams --save_experiment_configuration 1 omp_wo_params --extracted_forest_size_stop=0.05
python code/compute_results.py --stage 1 --experiment_ids 1 2 3 4 5 6 --dataset_name=california_housing
Command lines example for stage 2:
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 no_normalization --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 normalize_D --normalize_D --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 normalize_weights --normalize_weights --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 normalize_D_and_weights --normalize_D --normalize_weights --extracted_forest_size_stop=0.05
python code/compute_results.py --stage 2 --experiment_ids 7 8 9 10 --dataset_name=california_housing
Command lines example for stage 3:
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 3 train-dev_subset --extracted_forest_size_stop=0.05 --subsets_used train,dev
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 3 train-dev_train-dev_subset --extracted_forest_size_stop=0.05 --subsets_used train+dev,train+dev
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 3 train-train-dev_subset --extracted_forest_size_stop=0.05 --subsets_used train,train+dev
python code/compute_results.py --stage 3 --experiment_ids 11 12 13 --dataset_name=california_housing
Command lines example for stage 4:
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=none --save_experiment_configuration 4 none_with_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=random --save_experiment_configuration 4 random_with_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 4 omp_with_params --extracted_forest_size_stop=0.05 --subsets_used train+dev,train+dev
python code/compute_results.py --stage 4 --experiment_ids 1 2 3 --dataset_name=california_housing
"""
if __name__ == "__main__":
load_dotenv(find_dotenv('.env'))
DEFAULT_EXPERIMENT_CONFIGURATION_PATH = 'experiments'
# the models will be stored in a directory structure like: models/{experiment_id}/seeds/{seed_nb}/extracted_forest_sizes/{extracted_forest_size}
DEFAULT_MODELS_DIR = os.environ['project_dir'] + os.sep + 'models'
DEFAULT_VERBOSE = False
DEFAULT_SKIP_BEST_HYPERPARAMS = False
DEFAULT_JOB_NUMBER = -1
DEFAULT_EXTRACTION_STRATEGY = 'omp'
DEFAULT_OVERWRITE = False
begin_random_seed_range = 1
end_random_seed_range = 2000
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--experiment_id', nargs='?', type=int, default=None, help='Specify an experiment id. Remove already existing model with this specified experiment id.')
parser.add_argument('--experiment_configuration', nargs='?', type=str, default=None, help='Specify an experiment configuration file name. Overload all other parameters.')
parser.add_argument('--experiment_configuration_path', nargs='?', type=str, default=DEFAULT_EXPERIMENT_CONFIGURATION_PATH, help='Specify the experiment configuration directory path.')
parser.add_argument('--dataset_name', nargs='?', type=str, default=DatasetLoader.DEFAULT_DATASET_NAME, help='Specify the dataset. Regression: boston, diabetes, linnerud, california_housing. Classification: iris, digits, wine, breast_cancer, olivetti_faces, 20newsgroups, 20newsgroups_vectorized, lfw_people, lfw_pairs, covtype, rcv1, kddcup99.')
parser.add_argument('--normalize_D', action='store_true', default=DatasetLoader.DEFAULT_NORMALIZE_D, help='Specify if we want to normalize the prediction of the forest by doing the L2 division of the pred vectors.')
parser.add_argument('--dataset_normalizer', nargs='?', type=str, default=DatasetLoader.DEFAULT_DATASET_NORMALIZER, help='Specify which dataset normalizer use (either standard, minmax, robust or normalizer).')
parser.add_argument('--forest_size', nargs='?', type=int, default=None, help='The number of trees of the random forest.')
parser.add_argument('--extracted_forest_size_samples', nargs='?', type=int, default=DatasetLoader.DEFAULT_EXTRACTED_FOREST_SIZE_SAMPLES, help='The number of extracted forest sizes (proportional to the forest size) selected by OMP.')
parser.add_argument('--extracted_forest_size_stop', nargs='?', type=float, default=DatasetLoader.DEFAULT_EXTRACTED_FOREST_SIZE_STOP, help='Specify the upper bound of the extracted forest sizes linspace.')
parser.add_argument('--models_dir', nargs='?', type=str, default=DEFAULT_MODELS_DIR, help='The output directory of the trained models.')
parser.add_argument('--dev_size', nargs='?', type=float, default=DatasetLoader.DEFAULT_DEV_SIZE, help='Dev subset ratio.')
parser.add_argument('--test_size', nargs='?', type=float, default=DatasetLoader.DEFAULT_TEST_SIZE, help='Test subset ratio.')
parser.add_argument('--random_seed_number', nargs='?', type=int, default=DatasetLoader.DEFAULT_RANDOM_SEED_NUMBER, help='Number of random seeds used.')
parser.add_argument('--seeds', nargs='+', type=int, default=None, help='Specific a list of seeds instead of generate them randomly')
parser.add_argument('--subsets_used', nargs='?', type=str, default=DatasetLoader.DEFAULT_SUBSETS_USED, help='train,dev: forest on train, OMP on dev. train+dev,train+dev: both forest and OMP on train+dev. train,train+dev: forest on train+dev and OMP on dev.')
parser.add_argument('--normalize_weights', action='store_true', default=DatasetLoader.DEFAULT_NORMALIZE_WEIGHTS, help='Divide the predictions by the weights sum.')
parser.add_argument('--verbose', action='store_true', default=DEFAULT_VERBOSE, help='Print tqdm progress bar.')
parser.add_argument('--skip_best_hyperparams', action='store_true', default=DEFAULT_SKIP_BEST_HYPERPARAMS, help='Do not use the best hyperparameters if there exist.')
parser.add_argument('--save_experiment_configuration', nargs='+', default=None, help='Save the experiment parameters specified in the command line in a file. Args: {{stage_num}} {{name}}')
parser.add_argument('--job_number', nargs='?', type=int, default=DEFAULT_JOB_NUMBER, help='Specify the number of job used during the parallelisation across seeds.')
parser.add_argument('--extraction_strategy', nargs='?', type=str, default=DEFAULT_EXTRACTION_STRATEGY, help='Specify the strategy to apply to extract the trees from the forest. Either omp, omp_nn, random, none, similarity_similarities, similarity_predictions, kmeans, ensemble.')
parser.add_argument('--overwrite', action='store_true', default=DEFAULT_OVERWRITE, help='Overwrite the experiment id')
args = parser.parse_args()
if args.experiment_configuration:
with open(args.experiment_configuration_path + os.sep + \
args.experiment_configuration + '.json', 'r') as input_file:
parameters = json.load(input_file)
else:
parameters = args.__dict__
if parameters['extraction_strategy'] not in ['omp', 'omp_nn', 'omp_distillation', 'random', 'none', 'similarity_similarities', 'similarity_predictions', 'kmeans', 'ensemble']:
raise ValueError('Specified extraction strategy {} is not supported.'.format(parameters['extraction_strategy']))
pathlib.Path(parameters['models_dir']).mkdir(parents=True, exist_ok=True)
logger = LoggerFactory.create(LOG_PATH, os.path.basename(__file__))
hyperparameters_path = os.path.join('experiments', args.dataset_name, 'stage1', 'params.json')
if os.path.exists(hyperparameters_path):
logger.info("Hyperparameters found for this dataset at '{}'".format(hyperparameters_path))
with open(hyperparameters_path, 'r+') as file_hyperparameter:
loaded_hyperparameters = json.load(file_hyperparameter)['best_parameters']
if args.skip_best_hyperparams:
hyperparameters = {'n_estimators': loaded_hyperparameters['n_estimators']}
else:
hyperparameters = loaded_hyperparameters
else:
hyperparameters = {}
"""
First case: no best hyperparameters are specified and no forest_size parameter
specified in argument, so use the DEFAULT_FOREST_SIZE.
Second case: no matter if hyperparameters are specified, the forest_size parameter
will override it.
Third implicit case: use the number of estimators found in specified hyperparameters.
"""
if len(hyperparameters) == 0 and parameters['forest_size'] is None:
hyperparameters['n_estimators'] = DatasetLoader.DEFAULT_FOREST_SIZE
elif parameters['forest_size'] is not None:
hyperparameters['n_estimators'] = parameters['forest_size']
# The number of tree to extract from forest (K)
parameters['extracted_forest_size'] = np.unique(np.around(hyperparameters['n_estimators'] *
np.linspace(0, args.extracted_forest_size_stop,
parameters['extracted_forest_size_samples'] + 1,
endpoint=True)[1:]).astype(np.int)).tolist()
logger.info(f"extracted forest sizes: {parameters['extracted_forest_size']}")
if parameters['seeds'] != None and parameters['random_seed_number'] > 1:
logger.warning('seeds and random_seed_number parameters are both specified. Seeds will be used.')
# Seeds are either provided as parameters or generated at random
seeds = parameters['seeds'] if parameters['seeds'] is not None \
else [random.randint(begin_random_seed_range, end_random_seed_range) \
for i in range(parameters['random_seed_number'])]
if args.experiment_id:
experiment_id = args.experiment_id
if args.overwrite:
shutil.rmtree(os.path.join(parameters['models_dir'], str(experiment_id)), ignore_errors=True)
else:
# Resolve the next experiment id number (last id + 1)
experiment_id = resolve_experiment_id(parameters['models_dir'])
logger.info('Experiment id: {}'.format(experiment_id))
"""
If the experiment configuration isn't coming from
an already existing file, save it to a json file to
keep trace of it (either a specified path, either in 'unnamed' dir.).
"""
if args.experiment_configuration is None:
if args.save_experiment_configuration:
if len(args.save_experiment_configuration) != 2:
raise ValueError('save_experiment_configuration must have two parameters.')
elif int(args.save_experiment_configuration[0]) not in list(range(1, 6)):
raise ValueError('save_experiment_configuration first parameter must be a supported stage id (i.e. [1, 5]).')
output_experiment_stage_path = os.path.join(args.experiment_configuration_path,
args.dataset_name, 'stage' + args.save_experiment_configuration[0])
pathlib.Path(output_experiment_stage_path).mkdir(parents=True, exist_ok=True)
output_experiment_configuration_path = os.path.join(output_experiment_stage_path,
args.save_experiment_configuration[1] + '.json')
else:
pathlib.Path(os.path.join(args.experiment_configuration_path, 'unnamed')).mkdir(parents=True, exist_ok=True)
output_experiment_configuration_path = os.path.join(
args.experiment_configuration_path, 'unnamed', 'unnamed_{}.json'.format(
experiment_id))
with open(output_experiment_configuration_path, 'w') as output_file:
json.dump(
parameters,
output_file,
indent=4
)
# Run as much job as there are seeds
with tqdm_joblib(tqdm(total=len(seeds), disable=not args.verbose)) as seed_job_pb:
Parallel(n_jobs=args.job_number)(delayed(seed_job)(seed_job_pb, seeds[i],
parameters, experiment_id, hyperparameters, args.verbose) for i in range(len(seeds)))
|
the-stack_106_16552
|
#
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
"""Environment plugin."""
import gettext
import os
import pwd
from otopi import plugin
from otopi import util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine_common import constants as oengcommcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Environment plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
if self.environment[osetupcons.CoreEnv.DEVELOPER_MODE]:
rootUser = apacheUser = pwd.getpwuid(os.geteuid())[0]
else:
rootUser = oengcommcons.Defaults.DEFAULT_SYSTEM_USER_ROOT
apacheUser = oengcommcons.Defaults.DEFAULT_SYSTEM_USER_APACHE
self.environment.setdefault(
oengcommcons.SystemEnv.USER_ROOT,
rootUser
)
self.environment.setdefault(
oengcommcons.SystemEnv.USER_APACHE,
apacheUser
)
self.environment.setdefault(
oengcommcons.SystemEnv.USER_VDSM,
oengcommcons.Defaults.DEFAULT_SYSTEM_USER_VDSM
)
self.environment.setdefault(
oengcommcons.SystemEnv.GROUP_KVM,
oengcommcons.Defaults.DEFAULT_SYSTEM_GROUP_KVM
)
self.environment.setdefault(
oengcommcons.SystemEnv.USER_POSTGRES,
oengcommcons.Defaults.DEFAULT_SYSTEM_USER_POSTGRES
)
# vim: expandtab tabstop=4 shiftwidth=4
|
the-stack_106_16556
|
"""Train a DeepLab v3 plus model using tf.estimator API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
import deeplab_model
from utils import preprocessing
from tensorflow.python import debug as tf_debug
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str, default='./model',
help='Base directory for the model.')
parser.add_argument('--clean_model_dir', action='store_true',
help='Whether to clean up the model directory if present.')
parser.add_argument('--train_epochs', type=int, default=26,
help='Number of training epochs: '
'For 30K iteration with batch size 6, train_epoch = 17.01 (= 30K * 6 / 10,582). '
'For 30K iteration with batch size 8, train_epoch = 22.68 (= 30K * 8 / 10,582). '
'For 30K iteration with batch size 10, train_epoch = 25.52 (= 30K * 10 / 10,582). '
'For 30K iteration with batch size 11, train_epoch = 31.19 (= 30K * 11 / 10,582). '
'For 30K iteration with batch size 15, train_epoch = 42.53 (= 30K * 15 / 10,582). '
'For 30K iteration with batch size 16, train_epoch = 45.36 (= 30K * 16 / 10,582).')
parser.add_argument('--epochs_per_eval', type=int, default=1,
help='The number of training epochs to run between evaluations.')
parser.add_argument('--tensorboard_images_max_outputs', type=int, default=6,
help='Max number of batch elements to generate for Tensorboard.')
parser.add_argument('--batch_size', type=int, default=10,
help='Number of examples per batch.')
parser.add_argument('--learning_rate_policy', type=str, default='poly',
choices=['poly', 'piecewise'],
help='Learning rate policy to optimize loss.')
parser.add_argument('--max_iter', type=int, default=30000,
help='Number of maximum iteration used for "poly" learning rate policy.')
parser.add_argument('--data_dir', type=str, default='./dataset/',
help='Path to the directory containing the PASCAL VOC data tf record.')
parser.add_argument('--base_architecture', type=str, default='resnet_v2_101',
choices=['resnet_v2_50', 'resnet_v2_101'],
help='The architecture of base Resnet building block.')
parser.add_argument('--pre_trained_model', type=str, default='./ini_checkpoints/resnet_v2_101/resnet_v2_101.ckpt',
help='Path to the pre-trained model checkpoint.')
parser.add_argument('--output_stride', type=int, default=16,
choices=[8, 16],
help='Output stride for DeepLab v3. Currently 8 or 16 is supported.')
parser.add_argument('--freeze_batch_norm', action='store_true',
help='Freeze batch normalization parameters during the training.')
parser.add_argument('--initial_learning_rate', type=float, default=7e-3,
help='Initial learning rate for the optimizer.')
parser.add_argument('--end_learning_rate', type=float, default=1e-6,
help='End learning rate for the optimizer.')
parser.add_argument('--initial_global_step', type=int, default=0,
help='Initial global step for controlling learning rate when fine-tuning model.')
parser.add_argument('--weight_decay', type=float, default=2e-4,
help='The weight decay to use for regularizing the model.')
parser.add_argument('--debug', action='store_true',
help='Whether to use debugger to track down bad values during training.')
parser.add_argument('--psi_type', type=str, default='ZERO',
help='Options: ZERO, ONES, GAUSSIAN, SOBEL')
parser.add_argument('--psi_param', type=float, default=1.0,
help='Sigma if GAUSSIAN.')
parser.add_argument('--psi_scale', type=float, default=1.0,
help='Scale of PSI function.')
_NUM_CLASSES = 21
_HEIGHT = 513
_WIDTH = 513
_DEPTH = 3
_MIN_SCALE = 0.5
_MAX_SCALE = 2.0
_IGNORE_LABEL = 255
_POWER = 0.9
_MOMENTUM = 0.9
_BATCH_NORM_DECAY = 0.9997
_NUM_IMAGES = {
'train': 10582,
'validation': 1449,
}
def get_filenames(is_training, data_dir):
"""Return a list of filenames.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: path to the the directory containing the input data.
Returns:
A list of file names.
"""
if is_training:
return [os.path.join(data_dir, 'voc_train.record')]
else:
return [os.path.join(data_dir, 'voc_val.record')]
def parse_record(raw_record):
"""Parse PASCAL image and label from a tf record."""
keys_to_features = {
'image/height':
tf.FixedLenFeature((), tf.int64),
'image/width':
tf.FixedLenFeature((), tf.int64),
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'label/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'label/format':
tf.FixedLenFeature((), tf.string, default_value='png'),
}
parsed = tf.parse_single_example(raw_record, keys_to_features)
# height = tf.cast(parsed['image/height'], tf.int32)
# width = tf.cast(parsed['image/width'], tf.int32)
image = tf.image.decode_image(
tf.reshape(parsed['image/encoded'], shape=[]), _DEPTH)
image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))
image.set_shape([None, None, 3])
label = tf.image.decode_image(
tf.reshape(parsed['label/encoded'], shape=[]), 1)
label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))
label.set_shape([None, None, 1])
return image, label
def preprocess_image(image, label, is_training):
"""Preprocess a single image of layout [height, width, depth]."""
if is_training:
# Randomly scale the image and label.
image, label = preprocessing.random_rescale_image_and_label(
image, label, _MIN_SCALE, _MAX_SCALE)
# Randomly crop or pad a [_HEIGHT, _WIDTH] section of the image and label.
image, label = preprocessing.random_crop_or_pad_image_and_label(
image, label, _HEIGHT, _WIDTH, _IGNORE_LABEL)
# Randomly flip the image and label horizontally.
image, label = preprocessing.random_flip_left_right_image_and_label(
image, label)
image.set_shape([_HEIGHT, _WIDTH, 3])
label.set_shape([_HEIGHT, _WIDTH, 1])
image = preprocessing.mean_image_subtraction(image)
return image, label
def input_fn(is_training, data_dir, batch_size, num_epochs=1):
"""Input_fn using the tf.data input pipeline for CIFAR-10 dataset.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
Returns:
A tuple of images and labels.
"""
dataset = tf.data.Dataset.from_tensor_slices(get_filenames(is_training, data_dir))
dataset = dataset.flat_map(tf.data.TFRecordDataset)
if is_training:
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes have better performance.
# is a relatively small dataset, we choose to shuffle the full epoch.
dataset = dataset.shuffle(buffer_size=_NUM_IMAGES['train'])
dataset = dataset.map(parse_record)
dataset = dataset.map(
lambda image, label: preprocess_image(image, label, is_training))
dataset = dataset.prefetch(batch_size)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
def main(unused_argv):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
if FLAGS.clean_model_dir:
shutil.rmtree(FLAGS.model_dir, ignore_errors=True)
# Set up a RunConfig to only save checkpoints once per training cycle.
run_config = tf.estimator.RunConfig().replace(save_checkpoints_secs=1e9)
model = tf.estimator.Estimator(
model_fn=deeplab_model.deeplabv3_plus_model_fn,
model_dir=FLAGS.model_dir,
config=run_config,
params={
'output_stride': FLAGS.output_stride,
'batch_size': FLAGS.batch_size,
'base_architecture': FLAGS.base_architecture,
'pre_trained_model': FLAGS.pre_trained_model,
'batch_norm_decay': _BATCH_NORM_DECAY,
'num_classes': _NUM_CLASSES,
'tensorboard_images_max_outputs': FLAGS.tensorboard_images_max_outputs,
'weight_decay': FLAGS.weight_decay,
'learning_rate_policy': FLAGS.learning_rate_policy,
'num_train': _NUM_IMAGES['train'],
'initial_learning_rate': FLAGS.initial_learning_rate,
'max_iter': FLAGS.max_iter,
'end_learning_rate': FLAGS.end_learning_rate,
'power': _POWER,
'momentum': _MOMENTUM,
'freeze_batch_norm': FLAGS.freeze_batch_norm,
'initial_global_step': FLAGS.initial_global_step,
'psi_type': FLAGS.psi_type,
'psi_param': FLAGS.psi_param,
'psi_scale': FLAGS.psi_scale
})
for _ in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rate',
'cross_entropy': 'cross_entropy',
'train_px_accuracy': 'train_px_accuracy',
'train_mean_iou': 'train_mean_iou',
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=10)
train_hooks = [logging_hook]
eval_hooks = None
if FLAGS.debug:
debug_hook = tf_debug.LocalCLIDebugHook()
train_hooks.append(debug_hook)
eval_hooks = [debug_hook]
tf.logging.info("Start training.")
model.train(
input_fn=lambda: input_fn(True, FLAGS.data_dir, FLAGS.batch_size, FLAGS.epochs_per_eval),
hooks=train_hooks,
# steps=1 # For debug
)
tf.logging.info("Start evaluation.")
# Evaluate the model and print results
eval_results = model.evaluate(
# Batch size must be 1 for testing because the images' size differs
input_fn=lambda: input_fn(False, FLAGS.data_dir, 1),
hooks=eval_hooks,
# steps=1 # For debug
)
print(eval_results)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
the-stack_106_16557
|
# coding: utf-8
import warnings
import textwrap
from ruamel.yaml.compat import _F
if False: # MYPY
from typing import Any, Dict, Optional, List, Text # NOQA
__all__ = [
'FileMark',
'StringMark',
'CommentMark',
'YAMLError',
'MarkedYAMLError',
'ReusedAnchorWarning',
'UnsafeLoaderWarning',
'MarkedYAMLWarning',
'MarkedYAMLFutureWarning',
]
class StreamMark(object):
__slots__ = 'name', 'index', 'line', 'column'
def __init__(self, name, index, line, column):
# type: (Any, int, int, int) -> None
self.name = name
self.index = index
self.line = line
self.column = column
def __str__(self):
# type: () -> Any
where = _F(
' in "{sname!s}", line {sline1:d}, column {scolumn1:d}',
sname=self.name,
sline1=self.line + 1,
scolumn1=self.column + 1,
)
return where
def __eq__(self, other):
# type: (Any) -> bool
if self.line != other.line or self.column != other.column:
return False
if self.name != other.name or self.index != other.index:
return False
return True
def __ne__(self, other):
# type: (Any) -> bool
return not self.__eq__(other)
class FileMark(StreamMark):
__slots__ = ()
class StringMark(StreamMark):
__slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer'
def __init__(self, name, index, line, column, buffer, pointer):
# type: (Any, int, int, int, Any, Any) -> None
StreamMark.__init__(self, name, index, line, column)
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
# type: (int, int) -> Any
if self.buffer is None: # always False
return None
head = ""
start = self.pointer
while start > 0 and self.buffer[start - 1] not in '\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer - start > max_length / 2 - 1:
head = ' ... '
start += 5
break
tail = ""
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
end += 1
if end - self.pointer > max_length / 2 - 1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end]
caret = '^'
caret = '^ (line: {})'.format(self.line + 1)
return (
' ' * indent
+ head
+ snippet
+ tail
+ '\n'
+ ' ' * (indent + self.pointer - start + len(head))
+ caret
)
def __str__(self):
# type: () -> Any
snippet = self.get_snippet()
where = _F(
' in "{sname!s}", line {sline1:d}, column {scolumn1:d}',
sname=self.name,
sline1=self.line + 1,
scolumn1=self.column + 1,
)
if snippet is not None:
where += ':\n' + snippet
return where
def __repr__(self):
# type: () -> Any
snippet = self.get_snippet()
where = _F(
' in "{sname!s}", line {sline1:d}, column {scolumn1:d}',
sname=self.name,
sline1=self.line + 1,
scolumn1=self.column + 1,
)
if snippet is not None:
where += ':\n' + snippet
return where
class CommentMark(object):
__slots__ = ('column',)
def __init__(self, column):
# type: (Any) -> None
self.column = column
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(
self,
context=None,
context_mark=None,
problem=None,
problem_mark=None,
note=None,
warn=None,
):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
# warn is ignored
def __str__(self):
# type: () -> Any
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None and (
self.problem is None
or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column
):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
note = textwrap.dedent(self.note)
lines.append(note)
return '\n'.join(lines)
class YAMLStreamError(Exception):
pass
class YAMLWarning(Warning):
pass
class MarkedYAMLWarning(YAMLWarning):
def __init__(
self,
context=None,
context_mark=None,
problem=None,
problem_mark=None,
note=None,
warn=None,
):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
self.warn = warn
def __str__(self):
# type: () -> Any
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None and (
self.problem is None
or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column
):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
note = textwrap.dedent(self.note)
lines.append(note)
if self.warn is not None and self.warn:
warn = textwrap.dedent(self.warn)
lines.append(warn)
return '\n'.join(lines)
class ReusedAnchorWarning(YAMLWarning):
pass
class UnsafeLoaderWarning(YAMLWarning):
text = """
The default 'Loader' for 'load(stream)' without further arguments can be unsafe.
Use 'load(stream, Loader=ruamel.yaml.Loader)' explicitly if that is OK.
Alternatively include the following in your code:
import warnings
warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)
In most other cases you should consider using 'safe_load(stream)'"""
pass
warnings.simplefilter('once', UnsafeLoaderWarning)
class MantissaNoDotYAML1_1Warning(YAMLWarning):
def __init__(self, node, flt_str):
# type: (Any, Any) -> None
self.node = node
self.flt = flt_str
def __str__(self):
# type: () -> Any
line = self.node.start_mark.line
col = self.node.start_mark.column
return """
In YAML 1.1 floating point values should have a dot ('.') in their mantissa.
See the Floating-Point Language-Independent Type for YAML™ Version 1.1 specification
( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2
Correct your float: "{}" on line: {}, column: {}
or alternatively include the following in your code:
import warnings
warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
""".format(
self.flt, line, col
)
warnings.simplefilter('once', MantissaNoDotYAML1_1Warning)
class YAMLFutureWarning(Warning):
pass
class MarkedYAMLFutureWarning(YAMLFutureWarning):
def __init__(
self,
context=None,
context_mark=None,
problem=None,
problem_mark=None,
note=None,
warn=None,
):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
self.warn = warn
def __str__(self):
# type: () -> Any
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None and (
self.problem is None
or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column
):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
note = textwrap.dedent(self.note)
lines.append(note)
if self.warn is not None and self.warn:
warn = textwrap.dedent(self.warn)
lines.append(warn)
return '\n'.join(lines)
|
the-stack_106_16558
|
# -*- coding: utf-8 -*-
"""
Created on 03/09/2020
Author : Carlos Eduardo Barbosa
"""
from __future__ import print_function, division
import os
import itertools
import warnings
import numpy as np
from astropy.io import fits
from astropy.table import Table
import astropy.units as u
import astropy.constants as const
from astropy.wcs import WCS
from astropy.utils.exceptions import AstropyWarning
from tqdm import tqdm
import context
def make_cubes(indir, outdir, redo=False, bands=None, bscale=1e-19):
""" Get results from cutouts and join them in a cube. """
filenames = os.listdir(indir)
galaxy = os.path.split(indir)[1]
fields = set([_.split("_")[-4] for _ in filenames])
sizes = set([_.split("_")[-2] for _ in filenames])
bands = context.bands if bands is None else bands
wave = np.array([context.wave_eff[band] for band in bands]) * u.Angstrom
flam_unit = u.erg / u.cm / u.cm / u.s / u.AA
fnu_unit = u.erg / u.s / u.cm / u.cm / u.Hz
ext = {"swp": "DATA", "swpweight": "WEIGHTS"}
hfields = ["GAIN", "PSFFWHM", "DATE-OBS"]
for field, size in itertools.product(fields, sizes):
cubename = os.path.join(outdir, "{}_{}_{}.fits".format(galaxy, field,
size))
if os.path.exists(cubename) and not redo:
continue
# Loading and checking images
imgs = [os.path.join(indir, "{}_{}_{}_{}_swp.fits".format(galaxy,
field, band, size)) for band in bands]
if not all([os.path.exists(_) for _ in imgs]):
continue
# Checking if images have calibration available
headers = [fits.getheader(img, ext=1) for img in imgs]
if not all(["MAGZP" in h for h in headers]):
continue
# Checking if weight images are available
wimgs = [os.path.join(indir, "{}_{}_{}_{}_swpweight.fits".format(
galaxy, field, band, size)) for band in bands]
has_errs = all([os.path.exists(_) for _ in wimgs])
# Making new header with WCS
h = headers[0].copy()
del h["FILTER"]
del h["MAGZP"]
w = WCS(h)
nw = WCS(naxis=3)
nw.wcs.cdelt[:2] = w.wcs.cdelt
nw.wcs.crval[:2] = w.wcs.crval
nw.wcs.crpix[:2] = w.wcs.crpix
nw.wcs.ctype[0] = w.wcs.ctype[0]
nw.wcs.ctype[1] = w.wcs.ctype[1]
try:
nw.wcs.pc[:2, :2] = w.wcs.pc
except:
pass
h.update(nw.to_header())
# Performin calibration
m0 = np.array([h["MAGZP"] for h in headers])
gain = np.array([h["GAIN"] for h in headers])
f0 = np.power(10, -0.4 * (48.6 + m0))
data = np.array([fits.getdata(img, 1) for img in imgs])
fnu = data * f0[:, None, None] * fnu_unit
flam = fnu * const.c / wave[:, None, None]**2
flam = flam.to(flam_unit).value / bscale
if has_errs:
weights = np.array([fits.getdata(img, 1) for img in wimgs])
dataerr = 1 / weights + np.clip(data, 0, np.infty) / gain[:, None, None]
fnuerr= dataerr * f0[:, None, None] * fnu_unit
flamerr = fnuerr * const.c / wave[:, None, None] ** 2
flamerr = flamerr.to(flam_unit).value / bscale
# Making table with metadata
tab = []
tab.append(bands)
tab.append([context.wave_eff[band] for band in bands])
tab.append([context.exptimes[band] for band in bands])
names = ["FILTER", "WAVE_EFF", "EXPTIME"]
for f in hfields:
if not all([f in h for h in headers]):
continue
tab.append([h[f] for h in headers])
names.append(f)
tab = Table(tab, names=names)
# Producing data cubes HDUs.
hdus = [fits.PrimaryHDU()]
hdu1 = fits.ImageHDU(flam, h)
hdu1.header["EXTNAME"] = ("DATA", "Name of the extension")
hdus.append(hdu1)
if has_errs:
hdu2 = fits.ImageHDU(flamerr, h)
hdu2.header["EXTNAME"] = ("ERRORS", "Name of the extension")
hdus.append(hdu2)
for hdu in hdus:
hdu.header["BSCALE"] = (bscale, "Linear factor in scaling equation")
hdu.header["BZERO"] = (0, "Zero point in scaling equation")
hdu.header["BUNIT"] = ("{}".format(flam_unit),
"Physical units of the array values")
thdu = fits.BinTableHDU(tab)
hdus.append(thdu)
thdu.header["EXTNAME"] = "METADATA"
hdulist = fits.HDUList(hdus)
hdulist.writeto(cubename, overwrite=True)
if __name__ == "__main__":
warnings.simplefilter('ignore', category=AstropyWarning)
np.seterr(divide='ignore', invalid='ignore')
surveys = []
# surveys += ["patricia", "11HUGS"]
# surveys += ["smudges2", "FDS_dwarfs"]
surveys += ["FCC", "jellyfish", "FDS_UDGs"]
surveys += ["interacting_galaxies"]
surveys += ["sample_gc_galaxies"]
# surveys += ["FDS_LSB"]
surveys = ["FCC"]
for survey in surveys:
data_dir = "/home/kadu/Dropbox/splus-halpha/data" if survey == "FCC" \
else context.data_dir
cutouts_dir = os.path.join(data_dir, survey, "cutouts")
cubes_dir = os.path.join(data_dir, survey, "scubes")
if survey == "FCC":
cutouts_dir
if not os.path.exists(cubes_dir):
os.mkdir(cubes_dir)
galaxies = sorted(os.listdir(cutouts_dir))
desc = "Producing data cubes for {}".format(survey)
for galaxy in tqdm(galaxies, desc=desc):
wdir = os.path.join(cutouts_dir, galaxy)
make_cubes(wdir, cubes_dir, redo=True)
|
the-stack_106_16559
|
import os
import time
import numpy as np
import os.path as osp
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
from baselines.common.policies import build_policy
try:
from mpi4py import MPI
except ImportError:
MPI = None
from baselines.ppo2.runner import Runner
def constfn(val):
def f(_):
return val
return f
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, percent=0.5, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0)
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
if init_fn is not None:
init_fn()
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
occlude = update > (nupdates + 1) / 2
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run(occlude=occlude) #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
if update % log_interval == 0 and is_mpi_root: logger.info('Done.')
epinfobuf.extend(epinfos)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update_fn is not None:
update_fn(update)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("misc/serial_timesteps", update*nsteps)
logger.logkv("misc/nupdates", update)
logger.logkv("misc/total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("misc/explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('misc/time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv('loss/' + lossname, lossval)
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and is_mpi_root:
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
return model
# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
|
the-stack_106_16562
|
# -*- coding: utf-8 -*-
'''
Primary interfaces for the salt-cloud system
'''
# Need to get data from 4 sources!
# CLI options
# salt cloud config - CONFIG_DIR + '/cloud'
# salt master config (for master integration)
# salt VM config, where VMs are defined - CONFIG_DIR + '/cloud.profiles'
#
# The cli, master and cloud configs will merge for opts
# the VM data will be in opts['profiles']
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import logging
from salt.ext.six.moves import input
# Import salt libs
import salt.config
import salt.defaults.exitcodes
import salt.output
import salt.utils
from salt.utils import parsers
from salt.utils.verify import check_user, verify_env, verify_files, verify_log
# Import salt.cloud libs
import salt.cloud
import salt.utils.cloud
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
import salt.ext.six as six
import salt.syspaths as syspaths
log = logging.getLogger(__name__)
class SaltCloud(parsers.SaltCloudParser):
def run(self):
'''
Execute the salt-cloud command line
'''
# Parse shell arguments
self.parse_args()
salt_master_user = self.config.get('user')
if salt_master_user is None:
salt_master_user = salt.utils.get_user()
if not check_user(salt_master_user):
self.error(
'If salt-cloud is running on a master machine, salt-cloud '
'needs to run as the same user as the salt-master, \'{0}\'. '
'If salt-cloud is not running on a salt-master, the '
'appropriate write permissions must be granted to \'{1}\'. '
'Please run salt-cloud as root, \'{0}\', or change '
'permissions for \'{1}\'.'.format(
salt_master_user,
syspaths.CONFIG_DIR
)
)
try:
if self.config['verify_env']:
verify_env(
[os.path.dirname(self.config['conf_file'])],
salt_master_user,
root_dir=self.config['root_dir'],
)
logfile = self.config['log_file']
if logfile is not None and not logfile.startswith('tcp://') \
and not logfile.startswith('udp://') \
and not logfile.startswith('file://'):
# Logfile is not using Syslog, verify
verify_files([logfile], salt_master_user)
except (IOError, OSError) as err:
log.error('Error while verifying the environment: {0}'.format(err))
sys.exit(err.errno)
# Setup log file logging
self.setup_logfile_logger()
verify_log(self.config)
if self.options.update_bootstrap:
ret = salt.utils.cloud.update_bootstrap(self.config)
salt.output.display_output(ret,
self.options.output,
opts=self.config)
self.exit(salt.defaults.exitcodes.EX_OK)
log.info('salt-cloud starting')
try:
mapper = salt.cloud.Map(self.config)
except SaltCloudSystemExit as exc:
self.handle_exception(exc.args, exc)
except SaltCloudException as exc:
msg = 'There was an error generating the mapper.'
self.handle_exception(msg, exc)
names = self.config.get('names', None)
if names is not None:
filtered_rendered_map = {}
for map_profile in mapper.rendered_map:
filtered_map_profile = {}
for name in mapper.rendered_map[map_profile]:
if name in names:
filtered_map_profile[name] = mapper.rendered_map[map_profile][name]
if filtered_map_profile:
filtered_rendered_map[map_profile] = filtered_map_profile
mapper.rendered_map = filtered_rendered_map
ret = {}
if self.selected_query_option is not None:
if self.selected_query_option == 'list_providers':
try:
ret = mapper.provider_list()
except (SaltCloudException, Exception) as exc:
msg = 'There was an error listing providers: {0}'
self.handle_exception(msg, exc)
elif self.selected_query_option == 'list_profiles':
provider = self.options.list_profiles
try:
ret = mapper.profile_list(provider)
except(SaltCloudException, Exception) as exc:
msg = 'There was an error listing profiles: {0}'
self.handle_exception(msg, exc)
elif self.config.get('map', None):
log.info(
'Applying map from \'{0}\'.'.format(self.config['map'])
)
try:
ret = mapper.interpolated_map(
query=self.selected_query_option
)
except (SaltCloudException, Exception) as exc:
msg = 'There was an error with a custom map: {0}'
self.handle_exception(msg, exc)
else:
try:
ret = mapper.map_providers_parallel(
query=self.selected_query_option
)
except (SaltCloudException, Exception) as exc:
msg = 'There was an error with a map: {0}'
self.handle_exception(msg, exc)
elif self.options.list_locations is not None:
try:
ret = mapper.location_list(
self.options.list_locations
)
except (SaltCloudException, Exception) as exc:
msg = 'There was an error listing locations: {0}'
self.handle_exception(msg, exc)
elif self.options.list_images is not None:
try:
ret = mapper.image_list(
self.options.list_images
)
except (SaltCloudException, Exception) as exc:
msg = 'There was an error listing images: {0}'
self.handle_exception(msg, exc)
elif self.options.list_sizes is not None:
try:
ret = mapper.size_list(
self.options.list_sizes
)
except (SaltCloudException, Exception) as exc:
msg = 'There was an error listing sizes: {0}'
self.handle_exception(msg, exc)
elif self.options.destroy and (self.config.get('names', None) or
self.config.get('map', None)):
map_file = self.config.get('map', None)
names = self.config.get('names', ())
if map_file is not None:
if names != ():
msg = 'Supplying a mapfile, \'{0}\', in addition to instance names {1} ' \
'with the \'--destroy\' or \'-d\' function is not supported. ' \
'Please choose to delete either the entire map file or individual ' \
'instances.'.format(map_file, names)
self.handle_exception(msg, SaltCloudSystemExit)
log.info('Applying map from \'{0}\'.'.format(map_file))
matching = mapper.delete_map(query='list_nodes')
else:
matching = mapper.get_running_by_names(
names,
profile=self.options.profile
)
if not matching:
print('No machines were found to be destroyed')
self.exit(salt.defaults.exitcodes.EX_OK)
msg = 'The following virtual machines are set to be destroyed:\n'
names = set()
for alias, drivers in six.iteritems(matching):
msg += ' {0}:\n'.format(alias)
for driver, vms in six.iteritems(drivers):
msg += ' {0}:\n'.format(driver)
for name in vms:
msg += ' {0}\n'.format(name)
names.add(name)
try:
if self.print_confirm(msg):
ret = mapper.destroy(names, cached=True)
except (SaltCloudException, Exception) as exc:
msg = 'There was an error destroying machines: {0}'
self.handle_exception(msg, exc)
elif self.options.action and (self.config.get('names', None) or
self.config.get('map', None)):
if self.config.get('map', None):
log.info(
'Applying map from \'{0}\'.'.format(self.config['map'])
)
try:
names = mapper.get_vmnames_by_action(self.options.action)
except SaltCloudException as exc:
msg = 'There was an error actioning virtual machines.'
self.handle_exception(msg, exc)
else:
names = self.config.get('names', None)
kwargs = {}
machines = []
msg = (
'The following virtual machines are set to be actioned with '
'"{0}":\n'.format(
self.options.action
)
)
for name in names:
if '=' in name:
# This is obviously not a machine name, treat it as a kwarg
key, value = name.split('=', 1)
kwargs[key] = value
else:
msg += ' {0}\n'.format(name)
machines.append(name)
names = machines
try:
if self.print_confirm(msg):
ret = mapper.do_action(names, kwargs)
except (SaltCloudException, Exception) as exc:
msg = 'There was an error actioning machines: {0}'
self.handle_exception(msg, exc)
elif self.options.function:
kwargs = {}
args = self.args[:]
for arg in args[:]:
if '=' in arg:
key, value = arg.split('=', 1)
kwargs[key] = value
args.remove(arg)
if args:
self.error(
'Any arguments passed to --function need to be passed '
'as kwargs. Ex: image=ami-54cf5c3d. Remaining '
'arguments: {0}'.format(args)
)
try:
ret = mapper.do_function(
self.function_provider, self.function_name, kwargs
)
except (SaltCloudException, Exception) as exc:
msg = 'There was an error running the function: {0}'
self.handle_exception(msg, exc)
elif self.options.profile and self.config.get('names', False):
try:
ret = mapper.run_profile(
self.options.profile,
self.config.get('names')
)
except (SaltCloudException, Exception) as exc:
msg = 'There was a profile error: {0}'
self.handle_exception(msg, exc)
elif self.options.set_password:
username = self.credential_username
provider_name = "salt.cloud.provider.{0}".format(self.credential_provider)
# TODO: check if provider is configured
# set the password
salt.utils.cloud.store_password_in_keyring(provider_name, username)
elif self.config.get('map', None) and \
self.selected_query_option is None:
if len(mapper.rendered_map) == 0:
sys.stderr.write('No nodes defined in this map')
self.exit(salt.defaults.exitcodes.EX_GENERIC)
try:
ret = {}
run_map = True
log.info(
'Applying map from \'{0}\'.'.format(self.config['map'])
)
dmap = mapper.map_data()
msg = ''
if 'errors' in dmap:
# display profile errors
msg += 'Found the following errors:\n'
for profile_name, error in six.iteritems(dmap['errors']):
msg += ' {0}: {1}\n'.format(profile_name, error)
sys.stderr.write(msg)
sys.stderr.flush()
msg = ''
if 'existing' in dmap:
msg += ('The following virtual machines already exist:\n')
for name in dmap['existing']:
msg += ' {0}\n'.format(name)
if dmap['create']:
msg += ('The following virtual machines are set to be '
'created:\n')
for name in dmap['create']:
msg += ' {0}\n'.format(name)
if 'destroy' in dmap:
msg += ('The following virtual machines are set to be '
'destroyed:\n')
for name in dmap['destroy']:
msg += ' {0}\n'.format(name)
if not dmap['create'] and not dmap.get('destroy', None):
if not dmap.get('existing', None):
# nothing to create or destroy & nothing exists
print(msg)
self.exit(1)
else:
# nothing to create or destroy, print existing
run_map = False
if run_map:
if self.print_confirm(msg):
ret = mapper.run_map(dmap)
if self.config.get('parallel', False) is False:
log.info('Complete')
if dmap.get('existing', None):
for name in dmap['existing']:
if 'ec2' in dmap['existing'][name]['provider']:
msg = 'Instance already exists, or is terminated and has the same name.'
else:
msg = 'Already running.'
ret[name] = {'Message': msg}
except (SaltCloudException, Exception) as exc:
msg = 'There was a query error: {0}'
self.handle_exception(msg, exc)
elif self.options.bootstrap:
host = self.options.bootstrap
if self.args and '=' not in self.args[0]:
minion_id = self.args.pop(0)
else:
minion_id = host
vm_ = {
'driver': '',
'ssh_host': host,
'name': minion_id,
}
args = self.args[:]
for arg in args[:]:
if '=' in arg:
key, value = arg.split('=', 1)
vm_[key] = value
args.remove(arg)
if args:
self.error(
'Any arguments passed to --bootstrap need to be passed as '
'kwargs. Ex: ssh_username=larry. Remaining arguments: {0}'.format(args)
)
try:
ret = salt.utils.cloud.bootstrap(vm_, self.config)
except (SaltCloudException, Exception) as exc:
msg = 'There was an error bootstrapping the minion: {0}'
self.handle_exception(msg, exc)
else:
self.error('Nothing was done. Using the proper arguments?')
salt.output.display_output(ret,
self.options.output,
opts=self.config)
self.exit(salt.defaults.exitcodes.EX_OK)
def print_confirm(self, msg):
if self.options.assume_yes:
return True
print(msg)
res = input('Proceed? [N/y] ')
if not res.lower().startswith('y'):
return False
print('... proceeding')
return True
def handle_exception(self, msg, exc):
if isinstance(exc, SaltCloudException):
# It's a known exception and we know how to handle it
if isinstance(exc, SaltCloudSystemExit):
# This is a salt cloud system exit
if exc.exit_code > 0:
# the exit code is bigger than 0, it's an error
msg = 'Error: {0}'.format(msg)
self.exit(
exc.exit_code,
'{0}\n'.format(
msg.format(str(exc).rstrip())
)
)
# It's not a system exit but it's an error we can
# handle
self.error(
msg.format(str(exc))
)
# This is a generic exception, log it, include traceback if
# debug logging is enabled and exit.
log.error(
msg.format(exc),
# Show the traceback if the debug logging level is
# enabled
exc_info_on_loglevel=logging.DEBUG
)
self.exit(salt.defaults.exitcodes.EX_GENERIC)
|
the-stack_106_16564
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.script_helper import assert_python_ok
import sys
import threading
import time
import unittest
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO([email protected]): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO([email protected]): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO([email protected]): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
the-stack_106_16565
|
import os
import multiprocessing as mp
from typing import Text, Type, List, Set, Dict, Tuple
from copy import deepcopy
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from appyratus.enum import EnumValueStr
from ravel.util.misc_functions import remove_keys, import_object
from ravel.constants import ID, REV
from .base import Store, StoreEvent
class CacheMode(EnumValueStr):
@staticmethod
def values():
return {
'writethru',
'writeback',
'readonly',
}
class CacheStoreExecutor(ThreadPoolExecutor):
def __init__(self, store: 'CacheStore'):
super().__init__(max_workers=1, initializer=self.initializer)
self.store = store
def initializer(self):
self.store.be.bootstrap(self.store.be.app)
self.store.be.bind(self.store.be.resource_type)
def enqueue(self, method: Text, args=None, kwargs=None):
def task(store, event):
store.play([event])
event = StoreEvent(method=method, args=args, kwargs=kwargs)
return self.submit(task, store=self.store.be, event=event)
class CacheStore(Store):
prefetch = False
mode = CacheMode.writethru
fe = None
be = None
fe_params = None
be_params = None
def __init__(self):
super().__init__()
self.executor = None
@classmethod
def on_bootstrap(cls, prefetch=False, mode=None, front=None, back=None):
from .simulation_store import SimulationStore
cls.prefetch = prefetch if prefetch is not None else cls.prefetch
cls.mode = mode or cls.mode
cls.fe = SimulationStore()
cls.fe_params = front
cls.be_params = back
def on_bind(
self,
resource_type: Type['Resource'],
prefetch=False,
mode: CacheMode = None,
front: Dict = None,
back: Dict = None,
):
if prefetch is not None:
self.prefetch = prefetch
self.mode = mode or self.mode
front = front or self.fe_params
back = back or self.be_params
self.fe = self._setup_inner_store(
resource_type,
front['store'],
front.get('params', {}),
)
self.be = self._setup_inner_store(
resource_type,
back['store'],
back.get('params', {}),
)
if self.prefetch:
self.fetch_all()
if self.mode == CacheMode.writeback:
self.executor = CacheStoreExecutor(self)
def _setup_inner_store(
self, resource_type: Type['BizType'], store_class_name: Text, bind_params: Dict = None
):
# fetch the store type from the ResourceBinder
store_type = resource_type.ravel.app.manifest.store_classes.get(
store_class_name.split('.')[-1]
)
if store_type is None:
raise Exception(f'{store_type} not registered')
# create an instance of this store and bind it
store = store_type()
if not store.is_bound:
store.bind(resource_type, **(bind_params or {}))
return store
def create_id(self, record):
raise NotImplementedError()
def count(self) -> int:
return self.be.count()
def fetch(self, _id, fields: Dict = None) -> Dict:
return self.fetch_many({_id}, fields=fields).get(_id)
def fetch_all(self, fields: Set[Text] = None) -> Dict:
be_ids = {
rec[ID]
for rec in self.be.fetch_all(fields={ID}).values()
if rec is not None
}
return self.fetch_many(be_ids, fields=fields)
def fetch_many(self, _ids, fields: Dict = None) -> Dict:
fe_records = self.fe.fetch_many(_ids, fields=fields)
be_revs = self.be.fetch_many(fe_records.keys(), fields={REV})
ids = set(_ids) if not isinstance(_ids, set) else _ids
ids_fe = set(fe_records.keys()) # ids in FE
ids_missing = ids - ids_fe # ids not in FE
ids_to_delete = ids_fe - be_revs.keys() # ids to delete in FE
ids_to_update = set() # ids to update in FE
for _id, fe_rec in fe_records.items():
if fe_rec is None:
ids_missing.add(_id)
elif be_revs.get(_id, {}).get(REV, 0) > fe_rec.get(REV, 0):
ids_to_update.add(_id)
# records in BE ONLY
ids_to_fetch_from_be = ids_missing | ids_to_update
if ids_to_fetch_from_be:
be_records = self.be.fetch_many(ids_to_fetch_from_be)
else:
be_records = {}
# partition fe_records into separate lists for
# performing batch insert and update
records_to_update = []
records_to_create = []
for _id, be_rec in be_records.items():
if _id in ids_missing:
records_to_create.append(be_rec)
elif _id in ids_to_update:
records_to_update.append(be_rec)
# perform batch operations in FE
if ids_to_delete:
self.fe.delete_many(ids_to_delete)
if records_to_create:
self.fe.create_many(records_to_create)
if records_to_update:
self.fe.update_many(
(rec[ID] for rec in records_to_update), records_to_update
)
# merge fresh BE records to return into FE records
if be_records:
# TODO: prune the be_records to fields
if fields:
all_fields = set(self.resource_type.Schema.fields.keys())
fields_to_remove = all_fields - fields
for be_rec in remove_keys(
be_records.values(), fields_to_remove, in_place=True
):
if be_rec:
fe_records[be_rec[ID]] = be_rec
else:
fe_records.update(be_records)
return fe_records
def query(self, predicate, **kwargs):
"""
"""
fe_records = self.fe.query(predicate=predicate, **kwargs)
ids_fe = {rec[ID] for rec in fe_records}
# TODO: update predicate to fetch records with stale revs too
predicate = self.resource_type._id.excluding(ids_fe) & predicate
be_records = self.be.query(predicate=predicate, **kwargs)
# do batch FE operations
# merge BE records into FE records to return
fe_records.extend(self.fe.create_many(be_records))
return fe_records
def exists(self, _id) -> bool:
"""
Return True if the record with the given _id exists.
"""
return self.be.exists(_id)
def exists_many(self, _ids: Set) -> Dict[object, bool]:
return self.be.exists_many(_ids)
def create(self, data: Dict) -> Dict:
"""
Create a new record with the _id. If the _id is contained is not
contained in the data dict nor provided as the _id argument, it is the
responsibility of the Store class to generate the _id.
"""
fe_record = self.fe.create(data)
# remove _rev from a copy of fe_record so that the BE store doesn't
# increment it from what was set by the FE store.
fe_record_no_rev = fe_record.copy()
del fe_record_no_rev[REV]
if self.mode == CacheMode.writethru:
self.be.create(fe_record_no_rev)
if self.mode == CacheMode.writeback:
self.executor.enqueue('create', args=(fe_record_no_rev, ))
return fe_record
def create_many(self, records: List[Dict]) -> None:
"""
Create a new record. It is the responsibility of the Store class to
generate the _id.
"""
fe_records = self.fe.create_many(records)
fe_records_no_rev = []
for rec in fe_records.values():
rec = rec.copy()
del rec[REV]
fe_records_no_rev.append(rec)
if self.mode == CaheMode.writethru:
be_records = self.be.create_many(fe_records_no_rev)
elif self.mode == CacheMode.writeback:
self.executor.enqueue('create_many', args=(fe_records_no_rev, ))
return fe_records
def update(self, _id, record: Dict) -> Dict:
"""
Update a record with the data passed in.
record = self.persistence.update(_id, data)
"""
record.setdefault(ID, _id)
if not self.fe.exists(_id):
return self.create(record)
fe_record = self.fe.update(_id, record)
fe_record_no_rev = fe_record.copy()
del fe_record_no_rev[REV]
if self.mode == CacheMode.writethru:
self.be.update(_id, fe_record_no_rev)
elif self.mode == CacheMode.writeback:
self.executor.enqueue(
'update', args=(
_id,
fe_record_no_rev,
)
)
return fe_record
def update_many(self, _ids: List, data: Dict = None) -> None:
"""
Update multiple records. If a single data dict is passed in, then try to
apply the same update to all records; otherwise, if a list of data dicts
is passed in, try to zip the _ids with the data dicts and apply each
unique update or each group of identical updates individually.
"""
fe_records = self.fe.update_many(records)
if self.mode == CaheMode.writethru:
be_records = self.be.update_many(_ids, records)
elif self.mode == CacheMode.writeback:
self.executor.enqueue(
'create_many', args=(_ids, ), kwargs={'data': data}
)
return fe_records
def delete(self, _id) -> None:
"""
Delete a single record.
"""
self.fe.delete(_id)
if self.mode == CacheMode.writethru:
self.be.delete(_id)
elif self.mode == CacheMode.writeback:
self.executor.enqueue('delete', args=(_id, ))
def delete_many(self, _ids: List) -> None:
"""
Delete multiple records.
"""
self.fe.delete_many(_ids)
if self.mode == CacheMode.writethru:
self.be.delete_many(_ids)
elif self.mode == CacheMode.writeback:
self.executor.enqueue('delete_many', args=(_ids, ))
def delete_all(self) -> None:
raise NotImplementedError()
|
the-stack_106_16566
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import concurrent.futures
import csv
import datetime
import decimal
import json
import operator
import os
import time
import unittest
import uuid
import re
import six
import pytest
try:
from google.cloud import bigquery_storage_v1beta1
except ImportError: # pragma: NO COVER
bigquery_storage_v1beta1 = None
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
try:
import IPython
from IPython.utils import io
from IPython.testing import tools
from IPython.terminal import interactiveshell
except ImportError: # pragma: NO COVER
IPython = None
from google.api_core.exceptions import PreconditionFailed
from google.api_core.exceptions import BadRequest
from google.api_core.exceptions import Conflict
from google.api_core.exceptions import Forbidden
from google.api_core.exceptions import GoogleAPICallError
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import InternalServerError
from google.api_core.exceptions import ServiceUnavailable
from google.api_core.exceptions import TooManyRequests
from google.cloud import bigquery
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.table import Table
from google.cloud._helpers import UTC
from google.cloud.bigquery import dbapi
from google.cloud import storage
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
JOB_TIMEOUT = 120 # 2 minutes
WHERE = os.path.abspath(os.path.dirname(__file__))
# Common table data used for many tests.
ROWS = [
("Phred Phlyntstone", 32),
("Bharney Rhubble", 33),
("Wylma Phlyntstone", 29),
("Bhettye Rhubble", 27),
]
HEADER_ROW = ("Full Name", "Age")
SCHEMA = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
]
TIME_PARTITIONING_CLUSTERING_FIELDS_SCHEMA = [
bigquery.SchemaField("transaction_time", "TIMESTAMP", mode="REQUIRED"),
bigquery.SchemaField("transaction_id", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("user_email", "STRING", mode="REQUIRED"),
bigquery.SchemaField("store_code", "STRING", mode="REQUIRED"),
bigquery.SchemaField(
"items",
"RECORD",
mode="REPEATED",
fields=[
bigquery.SchemaField("item_code", "STRING", mode="REQUIRED"),
bigquery.SchemaField("quantity", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("comments", "STRING", mode="NULLABLE"),
bigquery.SchemaField("expiration_date", "DATE", mode="REQUIRED"),
],
),
]
retry_storage_errors = RetryErrors(
(TooManyRequests, InternalServerError, ServiceUnavailable)
)
def _has_rows(result):
return len(result) > 0
def _make_dataset_id(prefix):
return "%s%s" % (prefix, unique_resource_id())
def _load_json_schema(filename="data/schema.json"):
from google.cloud.bigquery.table import _parse_schema_resource
json_filename = os.path.join(WHERE, filename)
with open(json_filename, "r") as schema_file:
return _parse_schema_resource(json.load(schema_file))
def _rate_limit_exceeded(forbidden):
"""Predicate: pass only exceptions with 'rateLimitExceeded' as reason."""
return any(error["reason"] == "rateLimitExceeded" for error in forbidden._errors)
# We need to wait to stay within the rate limits.
# The alternative outcome is a 403 Forbidden response from upstream, which
# they return instead of the more appropriate 429.
# See https://cloud.google.com/bigquery/quota-policy
retry_403 = RetryErrors(Forbidden, error_predicate=_rate_limit_exceeded)
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
CURSOR = None
def setUpModule():
Config.CLIENT = bigquery.Client()
Config.CURSOR = dbapi.connect(Config.CLIENT).cursor()
class TestBigQuery(unittest.TestCase):
def setUp(self):
self.to_delete = []
def tearDown(self):
def _still_in_use(bad_request):
return any(
error["reason"] == "resourceInUse" for error in bad_request._errors
)
retry_in_use = RetryErrors(BadRequest, error_predicate=_still_in_use)
retry_storage_errors_conflict = RetryErrors(
(Conflict, TooManyRequests, InternalServerError, ServiceUnavailable)
)
for doomed in self.to_delete:
if isinstance(doomed, storage.Bucket):
retry_storage_errors_conflict(doomed.delete)(force=True)
elif isinstance(doomed, (Dataset, bigquery.DatasetReference)):
retry_in_use(Config.CLIENT.delete_dataset)(doomed, delete_contents=True)
elif isinstance(doomed, (Table, bigquery.TableReference)):
retry_in_use(Config.CLIENT.delete_table)(doomed)
else:
doomed.delete()
def test_get_service_account_email(self):
client = Config.CLIENT
got = client.get_service_account_email()
self.assertIsInstance(got, six.text_type)
self.assertIn("@", got)
def _create_bucket(self, bucket_name, location=None):
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
retry_storage_errors(bucket.create)(location=location)
self.to_delete.append(bucket)
return bucket
def test_create_dataset(self):
DATASET_ID = _make_dataset_id("create_dataset")
dataset = self.temp_dataset(DATASET_ID)
self.assertTrue(_dataset_exists(dataset))
self.assertEqual(dataset.dataset_id, DATASET_ID)
self.assertEqual(dataset.project, Config.CLIENT.project)
def test_get_dataset(self):
dataset_id = _make_dataset_id("get_dataset")
client = Config.CLIENT
dataset_arg = Dataset(client.dataset(dataset_id))
dataset_arg.friendly_name = "Friendly"
dataset_arg.description = "Description"
dataset = retry_403(client.create_dataset)(dataset_arg)
self.to_delete.append(dataset)
dataset_ref = client.dataset(dataset_id)
# Get with a reference.
got = client.get_dataset(dataset_ref)
self.assertEqual(got.friendly_name, "Friendly")
self.assertEqual(got.description, "Description")
# Get with a string.
got = client.get_dataset(dataset_id)
self.assertEqual(got.friendly_name, "Friendly")
self.assertEqual(got.description, "Description")
# Get with a fully-qualified string.
got = client.get_dataset("{}.{}".format(client.project, dataset_id))
self.assertEqual(got.friendly_name, "Friendly")
self.assertEqual(got.description, "Description")
def test_update_dataset(self):
dataset = self.temp_dataset(_make_dataset_id("update_dataset"))
self.assertTrue(_dataset_exists(dataset))
self.assertIsNone(dataset.friendly_name)
self.assertIsNone(dataset.description)
self.assertEqual(dataset.labels, {})
dataset.friendly_name = "Friendly"
dataset.description = "Description"
dataset.labels = {"priority": "high", "color": "blue"}
ds2 = Config.CLIENT.update_dataset(
dataset, ("friendly_name", "description", "labels")
)
self.assertEqual(ds2.friendly_name, "Friendly")
self.assertEqual(ds2.description, "Description")
self.assertEqual(ds2.labels, {"priority": "high", "color": "blue"})
ds2.labels = {
"color": "green", # change
"shape": "circle", # add
"priority": None, # delete
}
ds3 = Config.CLIENT.update_dataset(ds2, ["labels"])
self.assertEqual(ds3.labels, {"color": "green", "shape": "circle"})
# If we try to update using d2 again, it will fail because the
# previous update changed the ETag.
ds2.description = "no good"
with self.assertRaises(PreconditionFailed):
Config.CLIENT.update_dataset(ds2, ["description"])
def test_list_datasets(self):
datasets_to_create = [
"new" + unique_resource_id(),
"newer" + unique_resource_id(),
"newest" + unique_resource_id(),
]
for dataset_id in datasets_to_create:
self.temp_dataset(dataset_id)
# Retrieve the datasets.
iterator = Config.CLIENT.list_datasets()
all_datasets = list(iterator)
self.assertIsNone(iterator.next_page_token)
created = [
dataset
for dataset in all_datasets
if dataset.dataset_id in datasets_to_create
and dataset.project == Config.CLIENT.project
]
self.assertEqual(len(created), len(datasets_to_create))
def test_list_datasets_w_project(self):
# Retrieve datasets from a different project.
iterator = Config.CLIENT.list_datasets(project="bigquery-public-data")
all_datasets = frozenset([dataset.dataset_id for dataset in iterator])
self.assertIn("usa_names", all_datasets)
def test_create_table(self):
dataset = self.temp_dataset(_make_dataset_id("create_table"))
table_id = "test_table"
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_id)
def test_create_table_w_time_partitioning_w_clustering_fields(self):
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery.table import TimePartitioningType
dataset = self.temp_dataset(_make_dataset_id("create_table_tp_cf"))
table_id = "test_table"
table_arg = Table(
dataset.table(table_id), schema=TIME_PARTITIONING_CLUSTERING_FIELDS_SCHEMA
)
self.assertFalse(_table_exists(table_arg))
table_arg.time_partitioning = TimePartitioning(field="transaction_time")
table_arg.clustering_fields = ["user_email", "store_code"]
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_id)
time_partitioning = table.time_partitioning
self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)
self.assertEqual(time_partitioning.field, "transaction_time")
self.assertEqual(table.clustering_fields, ["user_email", "store_code"])
def test_delete_dataset_with_string(self):
dataset_id = _make_dataset_id("delete_table_true")
dataset_ref = Config.CLIENT.dataset(dataset_id)
retry_403(Config.CLIENT.create_dataset)(Dataset(dataset_ref))
self.assertTrue(_dataset_exists(dataset_ref))
Config.CLIENT.delete_dataset(dataset_id)
self.assertFalse(_dataset_exists(dataset_ref))
def test_delete_dataset_delete_contents_true(self):
dataset_id = _make_dataset_id("delete_table_true")
dataset = retry_403(Config.CLIENT.create_dataset)(
Dataset(Config.CLIENT.dataset(dataset_id))
)
table_id = "test_table"
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
Config.CLIENT.delete_dataset(dataset, delete_contents=True)
self.assertFalse(_table_exists(table))
def test_delete_dataset_delete_contents_false(self):
from google.api_core import exceptions
dataset = self.temp_dataset(_make_dataset_id("delete_table_false"))
table_id = "test_table"
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
retry_403(Config.CLIENT.create_table)(table_arg)
with self.assertRaises(exceptions.BadRequest):
Config.CLIENT.delete_dataset(dataset)
def test_get_table_w_public_dataset(self):
public = "bigquery-public-data"
dataset_id = "samples"
table_id = "shakespeare"
table_ref = DatasetReference(public, dataset_id).table(table_id)
# Get table with reference.
table = Config.CLIENT.get_table(table_ref)
self.assertEqual(table.table_id, table_id)
self.assertEqual(table.dataset_id, dataset_id)
self.assertEqual(table.project, public)
schema_names = [field.name for field in table.schema]
self.assertEqual(schema_names, ["word", "word_count", "corpus", "corpus_date"])
# Get table with string.
table = Config.CLIENT.get_table("{}.{}.{}".format(public, dataset_id, table_id))
self.assertEqual(table.table_id, table_id)
self.assertEqual(table.dataset_id, dataset_id)
self.assertEqual(table.project, public)
def test_list_partitions(self):
table_ref = DatasetReference(
"bigquery-public-data", "ethereum_blockchain"
).table("blocks")
all_rows = Config.CLIENT.list_partitions(table_ref)
self.assertIn("20180801", all_rows)
self.assertGreater(len(all_rows), 1000)
def test_list_tables(self):
dataset_id = _make_dataset_id("list_tables")
dataset = self.temp_dataset(dataset_id)
# Retrieve tables before any are created for the dataset.
iterator = Config.CLIENT.list_tables(dataset)
all_tables = list(iterator)
self.assertEqual(all_tables, [])
self.assertIsNone(iterator.next_page_token)
# Insert some tables to be listed.
tables_to_create = [
"new" + unique_resource_id(),
"newer" + unique_resource_id(),
"newest" + unique_resource_id(),
]
for table_name in tables_to_create:
table = Table(dataset.table(table_name), schema=SCHEMA)
created_table = retry_403(Config.CLIENT.create_table)(table)
self.to_delete.insert(0, created_table)
# Retrieve the tables.
iterator = Config.CLIENT.list_tables(dataset)
all_tables = list(iterator)
self.assertIsNone(iterator.next_page_token)
created = [
table
for table in all_tables
if (table.table_id in tables_to_create and table.dataset_id == dataset_id)
]
self.assertEqual(len(created), len(tables_to_create))
# List tables with a string ID.
iterator = Config.CLIENT.list_tables(dataset_id)
self.assertGreater(len(list(iterator)), 0)
# List tables with a fully-qualified string ID.
iterator = Config.CLIENT.list_tables(
"{}.{}".format(Config.CLIENT.project, dataset_id)
)
self.assertGreater(len(list(iterator)), 0)
def test_update_table(self):
dataset = self.temp_dataset(_make_dataset_id("update_table"))
TABLE_NAME = "test_table"
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertIsNone(table.friendly_name)
self.assertIsNone(table.description)
self.assertEqual(table.labels, {})
table.friendly_name = "Friendly"
table.description = "Description"
table.labels = {"priority": "high", "color": "blue"}
table2 = Config.CLIENT.update_table(
table, ["friendly_name", "description", "labels"]
)
self.assertEqual(table2.friendly_name, "Friendly")
self.assertEqual(table2.description, "Description")
self.assertEqual(table2.labels, {"priority": "high", "color": "blue"})
table2.description = None
table2.labels = {
"color": "green", # change
"shape": "circle", # add
"priority": None, # delete
}
table3 = Config.CLIENT.update_table(table2, ["description", "labels"])
self.assertIsNone(table3.description)
self.assertEqual(table3.labels, {"color": "green", "shape": "circle"})
# If we try to update using table2 again, it will fail because the
# previous update changed the ETag.
table2.description = "no good"
with self.assertRaises(PreconditionFailed):
Config.CLIENT.update_table(table2, ["description"])
def test_update_table_schema(self):
dataset = self.temp_dataset(_make_dataset_id("update_table"))
TABLE_NAME = "test_table"
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
voter = bigquery.SchemaField("voter", "BOOLEAN", mode="NULLABLE")
schema = table.schema
schema.append(voter)
table.schema = schema
updated_table = Config.CLIENT.update_table(table, ["schema"])
self.assertEqual(len(updated_table.schema), len(schema))
for found, expected in zip(updated_table.schema, schema):
self.assertEqual(found.name, expected.name)
self.assertEqual(found.field_type, expected.field_type)
self.assertEqual(found.mode, expected.mode)
@staticmethod
def _fetch_single_page(table, selected_fields=None):
iterator = Config.CLIENT.list_rows(table, selected_fields=selected_fields)
page = six.next(iterator.pages)
return list(page)
def _create_table_many_columns(self, rowcount):
# Generate a table of maximum width via CREATE TABLE AS SELECT.
# first column is named 'rowval', and has a value from 1..rowcount
# Subsequent column is named col_<N> and contains the value N*rowval,
# where N is between 1 and 9999 inclusive.
dsname = _make_dataset_id("wide_schema")
dataset = self.temp_dataset(dsname)
table_id = "many_columns"
table_ref = dataset.table(table_id)
self.to_delete.insert(0, table_ref)
colprojections = ",".join(
["r * {} as col_{}".format(n, n) for n in range(1, 10000)]
)
sql = """
CREATE TABLE {}.{}
AS
SELECT
r as rowval,
{}
FROM
UNNEST(GENERATE_ARRAY(1,{},1)) as r
""".format(
dsname, table_id, colprojections, rowcount
)
query_job = Config.CLIENT.query(sql)
query_job.result()
self.assertEqual(query_job.statement_type, "CREATE_TABLE_AS_SELECT")
self.assertEqual(query_job.ddl_operation_performed, "CREATE")
self.assertEqual(query_job.ddl_target_table, table_ref)
return table_ref
def test_query_many_columns(self):
# Test working with the widest schema BigQuery supports, 10k columns.
row_count = 2
table_ref = self._create_table_many_columns(row_count)
rows = list(
Config.CLIENT.query(
"SELECT * FROM `{}.{}`".format(table_ref.dataset_id, table_ref.table_id)
)
)
self.assertEqual(len(rows), row_count)
# check field representations adhere to expected values.
correctwidth = 0
badvals = 0
for r in rows:
vals = r._xxx_values
rowval = vals[0]
if len(vals) == 10000:
correctwidth = correctwidth + 1
for n in range(1, 10000):
if vals[n] != rowval * (n):
badvals = badvals + 1
self.assertEqual(correctwidth, row_count)
self.assertEqual(badvals, 0)
def test_insert_rows_then_dump_table(self):
NOW_SECONDS = 1448911495.484366
NOW = datetime.datetime.utcfromtimestamp(NOW_SECONDS).replace(tzinfo=UTC)
ROWS = [
("Phred Phlyntstone", 32, NOW),
("Bharney Rhubble", 33, NOW + datetime.timedelta(seconds=10)),
("Wylma Phlyntstone", 29, NOW + datetime.timedelta(seconds=20)),
("Bhettye Rhubble", 27, None),
]
ROW_IDS = range(len(ROWS))
dataset = self.temp_dataset(_make_dataset_id("insert_rows_then_dump"))
TABLE_ID = "test_table"
schema = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("now", "TIMESTAMP"),
]
table_arg = Table(dataset.table(TABLE_ID), schema=schema)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
errors = Config.CLIENT.insert_rows(table, ROWS, row_ids=ROW_IDS)
self.assertEqual(len(errors), 0)
rows = ()
# Allow for "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
by_age = operator.itemgetter(1)
self.assertEqual(sorted(row_tuples, key=by_age), sorted(ROWS, key=by_age))
def test_load_table_from_local_avro_file_then_dump_table(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
TABLE_NAME = "test_table_avro"
ROWS = [
("violet", 400),
("indigo", 445),
("blue", 475),
("green", 510),
("yellow", 570),
("orange", 590),
("red", 650),
]
dataset = self.temp_dataset(_make_dataset_id("load_local_then_dump"))
table_ref = dataset.table(TABLE_NAME)
table = Table(table_ref)
self.to_delete.insert(0, table)
with open(os.path.join(WHERE, "data", "colors.avro"), "rb") as avrof:
config = bigquery.LoadJobConfig()
config.source_format = SourceFormat.AVRO
config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job = Config.CLIENT.load_table_from_file(
avrof, table_ref, job_config=config
)
# Retry until done.
job.result(timeout=JOB_TIMEOUT)
self.assertEqual(job.output_rows, len(ROWS))
table = Config.CLIENT.get_table(table)
rows = self._fetch_single_page(table)
row_tuples = [r.values() for r in rows]
by_wavelength = operator.itemgetter(1)
self.assertEqual(
sorted(row_tuples, key=by_wavelength), sorted(ROWS, key=by_wavelength)
)
def test_load_avro_from_uri_then_dump_table(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
table_name = "test_table"
rows = [
("violet", 400),
("indigo", 445),
("blue", 475),
("green", 510),
("yellow", 570),
("orange", 590),
("red", 650),
]
with open(os.path.join(WHERE, "data", "colors.avro"), "rb") as f:
GS_URL = self._write_avro_to_storage(
"bq_load_test" + unique_resource_id(), "colors.avro", f
)
dataset = self.temp_dataset(_make_dataset_id("bq_load_test"))
table_arg = dataset.table(table_name)
table = retry_403(Config.CLIENT.create_table)(Table(table_arg))
self.to_delete.insert(0, table)
config = bigquery.LoadJobConfig()
config.create_disposition = CreateDisposition.CREATE_NEVER
config.source_format = SourceFormat.AVRO
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_uri(GS_URL, table_arg, job_config=config)
job.result(timeout=JOB_TIMEOUT)
self.assertEqual(job.output_rows, len(rows))
table = Config.CLIENT.get_table(table)
fetched = self._fetch_single_page(table)
row_tuples = [r.values() for r in fetched]
self.assertEqual(
sorted(row_tuples, key=lambda x: x[1]), sorted(rows, key=lambda x: x[1])
)
def test_load_table_from_uri_then_dump_table(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
TABLE_ID = "test_table"
GS_URL = self._write_csv_to_storage(
"bq_load_test" + unique_resource_id(), "person_ages.csv", HEADER_ROW, ROWS
)
dataset = self.temp_dataset(_make_dataset_id("load_gcs_then_dump"))
table_arg = Table(dataset.table(TABLE_ID), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
config = bigquery.LoadJobConfig()
config.create_disposition = CreateDisposition.CREATE_NEVER
config.skip_leading_rows = 1
config.source_format = SourceFormat.CSV
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_uri(
GS_URL, dataset.table(TABLE_ID), job_config=config
)
# Allow for 90 seconds of "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
rows = self._fetch_single_page(table)
row_tuples = [r.values() for r in rows]
by_age = operator.itemgetter(1)
self.assertEqual(sorted(row_tuples, key=by_age), sorted(ROWS, key=by_age))
def test_load_table_from_file_w_explicit_location(self):
# Create a temporary bucket for extract files.
bucket_name = "bq_load_table_eu_extract_test" + unique_resource_id()
self._create_bucket(bucket_name, location="eu")
# Create a temporary dataset & table in the EU.
table_bytes = six.BytesIO(b"a,3\nb,2\nc,1\n")
client = Config.CLIENT
dataset = self.temp_dataset(_make_dataset_id("eu_load_file"), location="EU")
table_ref = dataset.table("letters")
job_config = bigquery.LoadJobConfig()
job_config.skip_leading_rows = 0
job_config.schema = [
bigquery.SchemaField("letter", "STRING"),
bigquery.SchemaField("value", "INTEGER"),
]
# Load the file to an EU dataset with an EU load job.
load_job = client.load_table_from_file(
table_bytes, table_ref, location="EU", job_config=job_config
)
load_job.result()
job_id = load_job.job_id
# Can get the job from the EU.
load_job = client.get_job(job_id, location="EU")
self.assertEqual(job_id, load_job.job_id)
self.assertEqual("EU", load_job.location)
self.assertTrue(load_job.exists())
# Cannot get the job from the US.
with self.assertRaises(NotFound):
client.get_job(job_id, location="US")
load_job_us = client.get_job(job_id)
load_job_us._properties["jobReference"]["location"] = "US"
self.assertFalse(load_job_us.exists())
with self.assertRaises(NotFound):
load_job_us.reload()
# Can cancel the job from the EU.
self.assertTrue(load_job.cancel())
load_job = client.cancel_job(job_id, location="EU")
self.assertEqual(job_id, load_job.job_id)
self.assertEqual("EU", load_job.location)
# Cannot cancel the job from the US.
with self.assertRaises(NotFound):
client.cancel_job(job_id, location="US")
with self.assertRaises(NotFound):
load_job_us.cancel()
# Can list the table rows.
table = client.get_table(table_ref)
self.assertEqual(table.num_rows, 3)
rows = [(row.letter, row.value) for row in client.list_rows(table)]
self.assertEqual(list(sorted(rows)), [("a", 3), ("b", 2), ("c", 1)])
# Verify location behavior with queries
query_config = bigquery.QueryJobConfig()
query_config.dry_run = True
query_string = "SELECT * FROM `{}.letters` LIMIT 1".format(dataset.dataset_id)
eu_query = client.query(query_string, location="EU", job_config=query_config)
self.assertTrue(eu_query.done)
# Cannot query from US.
with self.assertRaises(GoogleAPICallError):
list(client.query(query_string, location="US", job_config=query_config))
# Cannot copy from US.
with self.assertRaises(GoogleAPICallError):
client.copy_table(
table_ref, dataset.table("letters2_us"), location="US"
).result()
# Cannot extract from US.
with self.assertRaises(GoogleAPICallError):
client.extract_table(
table_ref, "gs://{}/letters-us.csv".format(bucket_name), location="US"
).result()
def _write_csv_to_storage(self, bucket_name, blob_name, header_row, data_rows):
from google.cloud._testing import _NamedTemporaryFile
bucket = self._create_bucket(bucket_name)
blob = bucket.blob(blob_name)
with _NamedTemporaryFile() as temp:
with open(temp.name, "w") as csv_write:
writer = csv.writer(csv_write)
writer.writerow(header_row)
writer.writerows(data_rows)
with open(temp.name, "rb") as csv_read:
retry_storage_errors(blob.upload_from_file)(
csv_read, content_type="text/csv"
)
self.to_delete.insert(0, blob)
return "gs://{}/{}".format(bucket_name, blob_name)
def _write_avro_to_storage(self, bucket_name, blob_name, avro_file):
bucket = self._create_bucket(bucket_name)
blob = bucket.blob(blob_name)
retry_storage_errors(blob.upload_from_file)(
avro_file, content_type="application/x-avro-binary"
)
self.to_delete.insert(0, blob)
return "gs://{}/{}".format(bucket_name, blob_name)
def _load_table_for_extract_table(self, bucket, blob_name, table, rows):
from google.cloud._testing import _NamedTemporaryFile
blob = bucket.blob(blob_name)
with _NamedTemporaryFile() as temp:
with open(temp.name, "w") as csv_write:
writer = csv.writer(csv_write)
writer.writerow(HEADER_ROW)
writer.writerows(rows)
with open(temp.name, "rb") as csv_read:
retry_storage_errors(blob.upload_from_file)(
csv_read, content_type="text/csv"
)
self.to_delete.insert(0, blob)
dataset = self.temp_dataset(table.dataset_id)
table_ref = dataset.table(table.table_id)
config = bigquery.LoadJobConfig()
config.autodetect = True
gs_url = "gs://{}/{}".format(bucket.name, blob_name)
job = Config.CLIENT.load_table_from_uri(gs_url, table_ref, job_config=config)
# TODO(jba): do we need this retry now that we have job.result()?
# Allow for 90 seconds of "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
def test_extract_table(self):
local_id = unique_resource_id()
bucket_name = "bq_extract_test" + local_id
source_blob_name = "person_ages.csv"
dataset_id = _make_dataset_id("load_gcs_then_extract")
table_id = "test_table"
table_ref = Config.CLIENT.dataset(dataset_id).table(table_id)
table = Table(table_ref)
self.to_delete.insert(0, table)
bucket = self._create_bucket(bucket_name)
self._load_table_for_extract_table(bucket, source_blob_name, table_ref, ROWS)
destination_blob_name = "person_ages_out.csv"
destination = bucket.blob(destination_blob_name)
destination_uri = "gs://{}/person_ages_out.csv".format(bucket_name)
job = Config.CLIENT.extract_table(table_ref, destination_uri)
job.result(timeout=100)
self.to_delete.insert(0, destination)
got_bytes = retry_storage_errors(destination.download_as_string)()
got = got_bytes.decode("utf-8")
self.assertIn("Bharney Rhubble", got)
def test_copy_table(self):
# If we create a new table to copy from, the test won't work
# because the new rows will be stored in the streaming buffer,
# and copy jobs don't read the streaming buffer.
# We could wait for the streaming buffer to empty, but that could
# take minutes. Instead we copy a small public table.
source_dataset = DatasetReference("bigquery-public-data", "samples")
source_ref = source_dataset.table("shakespeare")
dest_dataset = self.temp_dataset(_make_dataset_id("copy_table"))
dest_ref = dest_dataset.table("destination_table")
job_config = bigquery.CopyJobConfig()
job = Config.CLIENT.copy_table(source_ref, dest_ref, job_config=job_config)
job.result()
dest_table = Config.CLIENT.get_table(dest_ref)
self.to_delete.insert(0, dest_table)
# Just check that we got some rows.
got_rows = self._fetch_single_page(dest_table)
self.assertTrue(len(got_rows) > 0)
def test_job_cancel(self):
DATASET_ID = _make_dataset_id("job_cancel")
JOB_ID_PREFIX = "fetch_" + DATASET_ID
TABLE_NAME = "test_table"
QUERY = "SELECT * FROM %s.%s" % (DATASET_ID, TABLE_NAME)
dataset = self.temp_dataset(DATASET_ID)
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
job = Config.CLIENT.query(QUERY, job_id_prefix=JOB_ID_PREFIX)
job.cancel()
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
# The `cancel` API doesn't leave any reliable traces on
# the status of the job resource, so we can't really assert for
# them here. The best we can do is not that the API call didn't
# raise an error, and that the job completed (in the `retry()`
# above).
def test_get_failed_job(self):
# issue 4246
from google.api_core.exceptions import BadRequest
JOB_ID = "invalid_{}".format(str(uuid.uuid4()))
QUERY = "SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);"
PARAM = bigquery.ScalarQueryParameter("ts_value", "TIMESTAMP", 1.4810976e9)
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = [PARAM]
with self.assertRaises(BadRequest):
Config.CLIENT.query(QUERY, job_id=JOB_ID, job_config=job_config).result()
job = Config.CLIENT.get_job(JOB_ID)
with self.assertRaises(ValueError):
job.query_parameters
def test_query_w_legacy_sql_types(self):
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
stamp = "%s %s" % (naive.date().isoformat(), naive.time().isoformat())
zoned = naive.replace(tzinfo=UTC)
examples = [
{"sql": "SELECT 1", "expected": 1},
{"sql": "SELECT 1.3", "expected": 1.3},
{"sql": "SELECT TRUE", "expected": True},
{"sql": 'SELECT "ABC"', "expected": "ABC"},
{"sql": 'SELECT CAST("foo" AS BYTES)', "expected": b"foo"},
{"sql": 'SELECT CAST("%s" AS TIMESTAMP)' % (stamp,), "expected": zoned},
]
for example in examples:
job_config = bigquery.QueryJobConfig()
job_config.use_legacy_sql = True
rows = list(Config.CLIENT.query(example["sql"], job_config=job_config))
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example["expected"])
def _generate_standard_sql_types_examples(self):
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
naive_microseconds = datetime.datetime(2016, 12, 5, 12, 41, 9, 250000)
stamp = "%s %s" % (naive.date().isoformat(), naive.time().isoformat())
stamp_microseconds = stamp + ".250000"
zoned = naive.replace(tzinfo=UTC)
zoned_microseconds = naive_microseconds.replace(tzinfo=UTC)
numeric = decimal.Decimal("123456789.123456789")
return [
{"sql": "SELECT 1", "expected": 1},
{"sql": "SELECT 1.3", "expected": 1.3},
{"sql": "SELECT TRUE", "expected": True},
{"sql": 'SELECT "ABC"', "expected": "ABC"},
{"sql": 'SELECT CAST("foo" AS BYTES)', "expected": b"foo"},
{"sql": 'SELECT TIMESTAMP "%s"' % (stamp,), "expected": zoned},
{
"sql": 'SELECT TIMESTAMP "%s"' % (stamp_microseconds,),
"expected": zoned_microseconds,
},
{"sql": 'SELECT DATETIME(TIMESTAMP "%s")' % (stamp,), "expected": naive},
{
"sql": 'SELECT DATETIME(TIMESTAMP "%s")' % (stamp_microseconds,),
"expected": naive_microseconds,
},
{"sql": 'SELECT DATE(TIMESTAMP "%s")' % (stamp,), "expected": naive.date()},
{"sql": 'SELECT TIME(TIMESTAMP "%s")' % (stamp,), "expected": naive.time()},
{"sql": 'SELECT NUMERIC "%s"' % (numeric,), "expected": numeric},
{"sql": "SELECT (1, 2)", "expected": {"_field_1": 1, "_field_2": 2}},
{
"sql": "SELECT ((1, 2), (3, 4), 5)",
"expected": {
"_field_1": {"_field_1": 1, "_field_2": 2},
"_field_2": {"_field_1": 3, "_field_2": 4},
"_field_3": 5,
},
},
{"sql": "SELECT [1, 2, 3]", "expected": [1, 2, 3]},
{
"sql": "SELECT ([1, 2], 3, [4, 5])",
"expected": {"_field_1": [1, 2], "_field_2": 3, "_field_3": [4, 5]},
},
{
"sql": "SELECT [(1, 2, 3), (4, 5, 6)]",
"expected": [
{"_field_1": 1, "_field_2": 2, "_field_3": 3},
{"_field_1": 4, "_field_2": 5, "_field_3": 6},
],
},
{
"sql": "SELECT [([1, 2, 3], 4), ([5, 6], 7)]",
"expected": [
{u"_field_1": [1, 2, 3], u"_field_2": 4},
{u"_field_1": [5, 6], u"_field_2": 7},
],
},
{
"sql": "SELECT ARRAY(SELECT STRUCT([1, 2]))",
"expected": [{u"_field_1": [1, 2]}],
},
{"sql": "SELECT ST_GeogPoint(1, 2)", "expected": "POINT(1 2)"},
]
def test_query_w_standard_sql_types(self):
examples = self._generate_standard_sql_types_examples()
for example in examples:
rows = list(Config.CLIENT.query(example["sql"]))
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example["expected"])
def test_query_w_failed_query(self):
from google.api_core.exceptions import BadRequest
with self.assertRaises(BadRequest):
Config.CLIENT.query("invalid syntax;").result()
def test_query_w_wrong_config(self):
from google.cloud.bigquery.job import LoadJobConfig
good_query = "SELECT 1;"
rows = list(Config.CLIENT.query("SELECT 1;").result())
assert rows[0][0] == 1
bad_config = LoadJobConfig()
bad_config.destination = Config.CLIENT.dataset("dset").table("tbl")
with self.assertRaises(Exception):
Config.CLIENT.query(good_query, job_config=bad_config).result()
def test_query_w_timeout(self):
query_job = Config.CLIENT.query(
"SELECT * FROM `bigquery-public-data.github_repos.commits`;",
job_id_prefix="test_query_w_timeout_",
)
with self.assertRaises(concurrent.futures.TimeoutError):
# 1 second is much too short for this query.
query_job.result(timeout=1)
def test_query_statistics(self):
"""
A system test to exercise some of the extended query statistics.
Note: We construct a query that should need at least three stages by
specifying a JOIN query. Exact plan and stats are effectively
non-deterministic, so we're largely interested in confirming values
are present.
"""
job_config = bigquery.QueryJobConfig()
job_config.use_query_cache = False
query_job = Config.CLIENT.query(
"""
SELECT
COUNT(1)
FROM
(
SELECT
year,
wban_number
FROM `bigquery-public-data.samples.gsod`
LIMIT 1000
) lside
INNER JOIN
(
SELECT
year,
state
FROM `bigquery-public-data.samples.natality`
LIMIT 1000
) rside
ON
lside.year = rside.year
""",
location="US",
job_config=job_config,
)
# run the job to completion
query_job.result()
# Assert top-level stats
self.assertFalse(query_job.cache_hit)
self.assertIsNotNone(query_job.destination)
self.assertTrue(query_job.done)
self.assertFalse(query_job.dry_run)
self.assertIsNone(query_job.num_dml_affected_rows)
self.assertEqual(query_job.priority, "INTERACTIVE")
self.assertGreater(query_job.total_bytes_billed, 1)
self.assertGreater(query_job.total_bytes_processed, 1)
self.assertEqual(query_job.statement_type, "SELECT")
self.assertGreater(query_job.slot_millis, 1)
# Make assertions on the shape of the query plan.
plan = query_job.query_plan
self.assertGreaterEqual(len(plan), 3)
first_stage = plan[0]
self.assertIsNotNone(first_stage.start)
self.assertIsNotNone(first_stage.end)
self.assertIsNotNone(first_stage.entry_id)
self.assertIsNotNone(first_stage.name)
self.assertGreater(first_stage.parallel_inputs, 0)
self.assertGreater(first_stage.completed_parallel_inputs, 0)
self.assertGreater(first_stage.shuffle_output_bytes, 0)
self.assertEqual(first_stage.status, "COMPLETE")
# Query plan is a digraph. Ensure it has inter-stage links,
# but not every stage has inputs.
stages_with_inputs = 0
for entry in plan:
if len(entry.input_stages) > 0:
stages_with_inputs = stages_with_inputs + 1
self.assertGreater(stages_with_inputs, 0)
self.assertGreater(len(plan), stages_with_inputs)
def test_dbapi_w_standard_sql_types(self):
examples = self._generate_standard_sql_types_examples()
for example in examples:
Config.CURSOR.execute(example["sql"])
self.assertEqual(Config.CURSOR.rowcount, 1)
row = Config.CURSOR.fetchone()
self.assertEqual(len(row), 1)
self.assertEqual(row[0], example["expected"])
row = Config.CURSOR.fetchone()
self.assertIsNone(row)
def test_dbapi_fetchall(self):
query = "SELECT * FROM UNNEST([(1, 2), (3, 4), (5, 6)])"
for arraysize in range(1, 5):
Config.CURSOR.execute(query)
self.assertEqual(Config.CURSOR.rowcount, 3, "expected 3 rows")
Config.CURSOR.arraysize = arraysize
rows = Config.CURSOR.fetchall()
row_tuples = [r.values() for r in rows]
self.assertEqual(row_tuples, [(1, 2), (3, 4), (5, 6)])
def _load_table_for_dml(self, rows, dataset_id, table_id):
from google.cloud._testing import _NamedTemporaryFile
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
dataset = self.temp_dataset(dataset_id)
greeting = bigquery.SchemaField("greeting", "STRING", mode="NULLABLE")
table_ref = dataset.table(table_id)
table_arg = Table(table_ref, schema=[greeting])
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
with _NamedTemporaryFile() as temp:
with open(temp.name, "w") as csv_write:
writer = csv.writer(csv_write)
writer.writerow(("Greeting",))
writer.writerows(rows)
with open(temp.name, "rb") as csv_read:
config = bigquery.LoadJobConfig()
config.source_format = SourceFormat.CSV
config.skip_leading_rows = 1
config.create_disposition = CreateDisposition.CREATE_NEVER
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_file(
csv_read, table_ref, job_config=config
)
# Retry until done.
job.result(timeout=JOB_TIMEOUT)
self._fetch_single_page(table)
def test_query_w_dml(self):
dataset_name = _make_dataset_id("dml_query")
table_name = "test_table"
self._load_table_for_dml([("Hello World",)], dataset_name, table_name)
query_template = """UPDATE {}.{}
SET greeting = 'Guten Tag'
WHERE greeting = 'Hello World'
"""
query_job = Config.CLIENT.query(
query_template.format(dataset_name, table_name),
job_id_prefix="test_query_w_dml_",
)
query_job.result()
self.assertEqual(query_job.num_dml_affected_rows, 1)
def test_dbapi_w_dml(self):
dataset_name = _make_dataset_id("dml_dbapi")
table_name = "test_table"
self._load_table_for_dml([("Hello World",)], dataset_name, table_name)
query_template = """UPDATE {}.{}
SET greeting = 'Guten Tag'
WHERE greeting = 'Hello World'
"""
Config.CURSOR.execute(
query_template.format(dataset_name, table_name),
job_id="test_dbapi_w_dml_{}".format(str(uuid.uuid4())),
)
self.assertEqual(Config.CURSOR.rowcount, 1)
self.assertIsNone(Config.CURSOR.fetchone())
def test_query_w_query_params(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ArrayQueryParameter
from google.cloud.bigquery.query import ScalarQueryParameter
from google.cloud.bigquery.query import StructQueryParameter
question = "What is the answer to life, the universe, and everything?"
question_param = ScalarQueryParameter(
name="question", type_="STRING", value=question
)
answer = 42
answer_param = ScalarQueryParameter(name="answer", type_="INT64", value=answer)
pi = 3.1415926
pi_param = ScalarQueryParameter(name="pi", type_="FLOAT64", value=pi)
pi_numeric = decimal.Decimal("3.141592654")
pi_numeric_param = ScalarQueryParameter(
name="pi_numeric_param", type_="NUMERIC", value=pi_numeric
)
truthy = True
truthy_param = ScalarQueryParameter(name="truthy", type_="BOOL", value=truthy)
beef = b"DEADBEEF"
beef_param = ScalarQueryParameter(name="beef", type_="BYTES", value=beef)
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
naive_param = ScalarQueryParameter(name="naive", type_="DATETIME", value=naive)
naive_date_param = ScalarQueryParameter(
name="naive_date", type_="DATE", value=naive.date()
)
naive_time_param = ScalarQueryParameter(
name="naive_time", type_="TIME", value=naive.time()
)
zoned = naive.replace(tzinfo=UTC)
zoned_param = ScalarQueryParameter(name="zoned", type_="TIMESTAMP", value=zoned)
array_param = ArrayQueryParameter(
name="array_param", array_type="INT64", values=[1, 2]
)
struct_param = StructQueryParameter("hitchhiker", question_param, answer_param)
phred_name = "Phred Phlyntstone"
phred_name_param = ScalarQueryParameter(
name="name", type_="STRING", value=phred_name
)
phred_age = 32
phred_age_param = ScalarQueryParameter(
name="age", type_="INT64", value=phred_age
)
phred_param = StructQueryParameter(None, phred_name_param, phred_age_param)
bharney_name = "Bharney Rhubbyl"
bharney_name_param = ScalarQueryParameter(
name="name", type_="STRING", value=bharney_name
)
bharney_age = 31
bharney_age_param = ScalarQueryParameter(
name="age", type_="INT64", value=bharney_age
)
bharney_param = StructQueryParameter(
None, bharney_name_param, bharney_age_param
)
characters_param = ArrayQueryParameter(
name=None, array_type="RECORD", values=[phred_param, bharney_param]
)
hero_param = StructQueryParameter("hero", phred_name_param, phred_age_param)
sidekick_param = StructQueryParameter(
"sidekick", bharney_name_param, bharney_age_param
)
roles_param = StructQueryParameter("roles", hero_param, sidekick_param)
friends_param = ArrayQueryParameter(
name="friends", array_type="STRING", values=[phred_name, bharney_name]
)
with_friends_param = StructQueryParameter(None, friends_param)
top_left_param = StructQueryParameter(
"top_left",
ScalarQueryParameter("x", "INT64", 12),
ScalarQueryParameter("y", "INT64", 102),
)
bottom_right_param = StructQueryParameter(
"bottom_right",
ScalarQueryParameter("x", "INT64", 22),
ScalarQueryParameter("y", "INT64", 92),
)
rectangle_param = StructQueryParameter(
"rectangle", top_left_param, bottom_right_param
)
examples = [
{
"sql": "SELECT @question",
"expected": question,
"query_parameters": [question_param],
},
{
"sql": "SELECT @answer",
"expected": answer,
"query_parameters": [answer_param],
},
{"sql": "SELECT @pi", "expected": pi, "query_parameters": [pi_param]},
{
"sql": "SELECT @pi_numeric_param",
"expected": pi_numeric,
"query_parameters": [pi_numeric_param],
},
{
"sql": "SELECT @truthy",
"expected": truthy,
"query_parameters": [truthy_param],
},
{"sql": "SELECT @beef", "expected": beef, "query_parameters": [beef_param]},
{
"sql": "SELECT @naive",
"expected": naive,
"query_parameters": [naive_param],
},
{
"sql": "SELECT @naive_date",
"expected": naive.date(),
"query_parameters": [naive_date_param],
},
{
"sql": "SELECT @naive_time",
"expected": naive.time(),
"query_parameters": [naive_time_param],
},
{
"sql": "SELECT @zoned",
"expected": zoned,
"query_parameters": [zoned_param],
},
{
"sql": "SELECT @array_param",
"expected": [1, 2],
"query_parameters": [array_param],
},
{
"sql": "SELECT (@hitchhiker.question, @hitchhiker.answer)",
"expected": ({"_field_1": question, "_field_2": answer}),
"query_parameters": [struct_param],
},
{
"sql": "SELECT "
"((@rectangle.bottom_right.x - @rectangle.top_left.x) "
"* (@rectangle.top_left.y - @rectangle.bottom_right.y))",
"expected": 100,
"query_parameters": [rectangle_param],
},
{
"sql": "SELECT ?",
"expected": [
{"name": phred_name, "age": phred_age},
{"name": bharney_name, "age": bharney_age},
],
"query_parameters": [characters_param],
},
{
"sql": "SELECT @roles",
"expected": {
"hero": {"name": phred_name, "age": phred_age},
"sidekick": {"name": bharney_name, "age": bharney_age},
},
"query_parameters": [roles_param],
},
{
"sql": "SELECT ?",
"expected": {"friends": [phred_name, bharney_name]},
"query_parameters": [with_friends_param],
},
]
for example in examples:
jconfig = QueryJobConfig()
jconfig.query_parameters = example["query_parameters"]
query_job = Config.CLIENT.query(
example["sql"],
job_config=jconfig,
job_id_prefix="test_query_w_query_params",
)
rows = list(query_job.result())
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example["expected"])
def test_dbapi_w_query_parameters(self):
examples = [
{
"sql": "SELECT %(boolval)s",
"expected": True,
"query_parameters": {"boolval": True},
},
{
"sql": 'SELECT %(a "very" weird `name`)s',
"expected": True,
"query_parameters": {'a "very" weird `name`': True},
},
{
"sql": "SELECT %(select)s",
"expected": True,
"query_parameters": {"select": True}, # this name is a keyword
},
{"sql": "SELECT %s", "expected": False, "query_parameters": [False]},
{
"sql": "SELECT %(intval)s",
"expected": 123,
"query_parameters": {"intval": 123},
},
{
"sql": "SELECT %s",
"expected": -123456789,
"query_parameters": [-123456789],
},
{
"sql": "SELECT %(floatval)s",
"expected": 1.25,
"query_parameters": {"floatval": 1.25},
},
{
"sql": "SELECT LOWER(%(strval)s)",
"query_parameters": {"strval": "I Am A String"},
"expected": "i am a string",
},
{
"sql": "SELECT DATE_SUB(%(dateval)s, INTERVAL 1 DAY)",
"query_parameters": {"dateval": datetime.date(2017, 4, 2)},
"expected": datetime.date(2017, 4, 1),
},
{
"sql": "SELECT TIME_ADD(%(timeval)s, INTERVAL 4 SECOND)",
"query_parameters": {"timeval": datetime.time(12, 34, 56)},
"expected": datetime.time(12, 35, 0),
},
{
"sql": ("SELECT DATETIME_ADD(%(datetimeval)s, INTERVAL 53 SECOND)"),
"query_parameters": {
"datetimeval": datetime.datetime(2012, 3, 4, 5, 6, 7)
},
"expected": datetime.datetime(2012, 3, 4, 5, 7, 0),
},
{
"sql": "SELECT TIMESTAMP_TRUNC(%(zoned)s, MINUTE)",
"query_parameters": {
"zoned": datetime.datetime(2012, 3, 4, 5, 6, 7, tzinfo=UTC)
},
"expected": datetime.datetime(2012, 3, 4, 5, 6, 0, tzinfo=UTC),
},
{
"sql": "SELECT TIMESTAMP_TRUNC(%(zoned)s, MINUTE)",
"query_parameters": {
"zoned": datetime.datetime(2012, 3, 4, 5, 6, 7, 250000, tzinfo=UTC)
},
"expected": datetime.datetime(2012, 3, 4, 5, 6, 0, tzinfo=UTC),
},
]
for example in examples:
msg = "sql: {} query_parameters: {}".format(
example["sql"], example["query_parameters"]
)
Config.CURSOR.execute(example["sql"], example["query_parameters"])
self.assertEqual(Config.CURSOR.rowcount, 1, msg=msg)
row = Config.CURSOR.fetchone()
self.assertEqual(len(row), 1, msg=msg)
self.assertEqual(row[0], example["expected"], msg=msg)
row = Config.CURSOR.fetchone()
self.assertIsNone(row, msg=msg)
def test_large_query_w_public_data(self):
PUBLIC = "bigquery-public-data"
DATASET_ID = "samples"
TABLE_NAME = "natality"
LIMIT = 1000
SQL = "SELECT * from `{}.{}.{}` LIMIT {}".format(
PUBLIC, DATASET_ID, TABLE_NAME, LIMIT
)
query_job = Config.CLIENT.query(SQL)
rows = list(query_job)
self.assertEqual(len(rows), LIMIT)
def test_query_future(self):
query_job = Config.CLIENT.query("SELECT 1")
iterator = query_job.result(timeout=JOB_TIMEOUT)
row_tuples = [r.values() for r in iterator]
self.assertEqual(row_tuples, [(1,)])
def test_query_iter(self):
import types
query_job = Config.CLIENT.query("SELECT 1")
self.assertIsInstance(iter(query_job), types.GeneratorType)
row_tuples = [r.values() for r in query_job]
self.assertEqual(row_tuples, [(1,)])
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_query_results_to_dataframe(self):
QUERY = """
SELECT id, author, time_ts, dead
FROM `bigquery-public-data.hacker_news.comments`
LIMIT 10
"""
df = Config.CLIENT.query(QUERY).result().to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 10) # verify the number of rows
column_names = ["id", "author", "time_ts", "dead"]
self.assertEqual(list(df), column_names) # verify the column names
exp_datatypes = {
"id": int,
"author": six.text_type,
"time_ts": pandas.Timestamp,
"dead": bool,
}
for index, row in df.iterrows():
for col in column_names:
# all the schema fields are nullable, so None is acceptable
if not row[col] is None:
self.assertIsInstance(row[col], exp_datatypes[col])
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_query_results_to_dataframe_w_bqstorage(self):
dest_dataset = self.temp_dataset(_make_dataset_id("bqstorage_to_dataframe_"))
dest_ref = dest_dataset.table("query_results")
query = """
SELECT id, author, time_ts, dead
FROM `bigquery-public-data.hacker_news.comments`
LIMIT 10
"""
bqstorage_client = bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=Config.CLIENT._credentials
)
df = (
Config.CLIENT.query(
query,
# There is a known issue reading small anonymous query result
# tables with the BQ Storage API. Writing to a destination
# table works around this issue.
job_config=bigquery.QueryJobConfig(
destination=dest_ref, write_disposition="WRITE_TRUNCATE"
),
)
.result()
.to_dataframe(bqstorage_client)
)
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 10) # verify the number of rows
column_names = ["id", "author", "time_ts", "dead"]
self.assertEqual(list(df), column_names)
exp_datatypes = {
"id": int,
"author": six.text_type,
"time_ts": pandas.Timestamp,
"dead": bool,
}
for index, row in df.iterrows():
for col in column_names:
# all the schema fields are nullable, so None is acceptable
if not row[col] is None:
self.assertIsInstance(row[col], exp_datatypes[col])
def test_insert_rows_nested_nested(self):
# See #2951
SF = bigquery.SchemaField
schema = [
SF("string_col", "STRING", mode="NULLABLE"),
SF(
"record_col",
"RECORD",
mode="NULLABLE",
fields=[
SF("nested_string", "STRING", mode="NULLABLE"),
SF("nested_repeated", "INTEGER", mode="REPEATED"),
SF(
"nested_record",
"RECORD",
mode="NULLABLE",
fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
),
],
),
]
record = {
"nested_string": "another string value",
"nested_repeated": [0, 1, 2],
"nested_record": {"nested_nested_string": "some deep insight"},
}
to_insert = [("Some value", record)]
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("issue_2951"))
table_arg = Table(dataset.table(table_id), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
Config.CLIENT.insert_rows(table, to_insert)
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
self.assertEqual(row_tuples, to_insert)
def test_insert_rows_nested_nested_dictionary(self):
# See #2951
SF = bigquery.SchemaField
schema = [
SF("string_col", "STRING", mode="NULLABLE"),
SF(
"record_col",
"RECORD",
mode="NULLABLE",
fields=[
SF("nested_string", "STRING", mode="NULLABLE"),
SF("nested_repeated", "INTEGER", mode="REPEATED"),
SF(
"nested_record",
"RECORD",
mode="NULLABLE",
fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
),
],
),
]
record = {
"nested_string": "another string value",
"nested_repeated": [0, 1, 2],
"nested_record": {"nested_nested_string": "some deep insight"},
}
to_insert = [{"string_col": "Some value", "record_col": record}]
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("issue_2951"))
table_arg = Table(dataset.table(table_id), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
Config.CLIENT.insert_rows(table, to_insert)
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
expected_rows = [("Some value", record)]
self.assertEqual(row_tuples, expected_rows)
def test_create_table_rows_fetch_nested_schema(self):
table_name = "test_table"
dataset = self.temp_dataset(_make_dataset_id("create_table_nested_schema"))
schema = _load_json_schema()
table_arg = Table(dataset.table(table_name), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_name)
to_insert = []
# Data is in "JSON Lines" format, see http://jsonlines.org/
json_filename = os.path.join(WHERE, "data", "characters.jsonl")
with open(json_filename) as rows_file:
for line in rows_file:
to_insert.append(json.loads(line))
errors = Config.CLIENT.insert_rows_json(table, to_insert)
self.assertEqual(len(errors), 0)
retry = RetryResult(_has_rows, max_tries=8)
fetched = retry(self._fetch_single_page)(table)
fetched_tuples = [f.values() for f in fetched]
self.assertEqual(len(fetched), len(to_insert))
for found, expected in zip(sorted(fetched_tuples), to_insert):
self.assertEqual(found[0], expected["Name"])
self.assertEqual(found[1], int(expected["Age"]))
self.assertEqual(found[2], expected["Weight"])
self.assertEqual(found[3], expected["IsMagic"])
self.assertEqual(len(found[4]), len(expected["Spells"]))
for f_spell, e_spell in zip(found[4], expected["Spells"]):
self.assertEqual(f_spell["Name"], e_spell["Name"])
parts = time.strptime(e_spell["LastUsed"], "%Y-%m-%d %H:%M:%S UTC")
e_used = datetime.datetime(*parts[0:6], tzinfo=UTC)
self.assertEqual(f_spell["LastUsed"], e_used)
self.assertEqual(f_spell["DiscoveredBy"], e_spell["DiscoveredBy"])
self.assertEqual(f_spell["Properties"], e_spell["Properties"])
e_icon = base64.standard_b64decode(e_spell["Icon"].encode("ascii"))
self.assertEqual(f_spell["Icon"], e_icon)
parts = time.strptime(expected["TeaTime"], "%H:%M:%S")
e_teatime = datetime.time(*parts[3:6])
self.assertEqual(found[5], e_teatime)
parts = time.strptime(expected["NextVacation"], "%Y-%m-%d")
e_nextvac = datetime.date(*parts[0:3])
self.assertEqual(found[6], e_nextvac)
parts = time.strptime(expected["FavoriteTime"], "%Y-%m-%dT%H:%M:%S")
e_favtime = datetime.datetime(*parts[0:6])
self.assertEqual(found[7], e_favtime)
self.assertEqual(found[8], decimal.Decimal(expected["FavoriteNumber"]))
def _fetch_dataframe(self, query):
return Config.CLIENT.query(query).result().to_dataframe()
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_nested_table_to_dataframe(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
SF = bigquery.SchemaField
schema = [
SF("string_col", "STRING", mode="NULLABLE"),
SF(
"record_col",
"RECORD",
mode="NULLABLE",
fields=[
SF("nested_string", "STRING", mode="NULLABLE"),
SF("nested_repeated", "INTEGER", mode="REPEATED"),
SF(
"nested_record",
"RECORD",
mode="NULLABLE",
fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
),
],
),
SF("bigfloat_col", "FLOAT", mode="NULLABLE"),
SF("smallfloat_col", "FLOAT", mode="NULLABLE"),
]
record = {
"nested_string": "another string value",
"nested_repeated": [0, 1, 2],
"nested_record": {"nested_nested_string": "some deep insight"},
}
to_insert = [
{
"string_col": "Some value",
"record_col": record,
"bigfloat_col": 3.14,
"smallfloat_col": 2.72,
}
]
rows = [json.dumps(row) for row in to_insert]
body = six.BytesIO("{}\n".format("\n".join(rows)).encode("ascii"))
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("nested_df"))
table = dataset.table(table_id)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
job_config.schema = schema
# Load a table using a local JSON file from memory.
Config.CLIENT.load_table_from_file(body, table, job_config=job_config).result()
df = Config.CLIENT.list_rows(table, selected_fields=schema).to_dataframe(
dtypes={"smallfloat_col": "float16"}
)
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 1) # verify the number of rows
exp_columns = ["string_col", "record_col", "bigfloat_col", "smallfloat_col"]
self.assertEqual(list(df), exp_columns) # verify the column names
row = df.iloc[0]
# verify the row content
self.assertEqual(row["string_col"], "Some value")
self.assertEqual(row["record_col"], record)
# verify that nested data can be accessed with indices/keys
self.assertEqual(row["record_col"]["nested_repeated"][0], 0)
self.assertEqual(
row["record_col"]["nested_record"]["nested_nested_string"],
"some deep insight",
)
# verify dtypes
self.assertEqual(df.dtypes["bigfloat_col"].name, "float64")
self.assertEqual(df.dtypes["smallfloat_col"].name, "float16")
def test_list_rows_empty_table(self):
from google.cloud.bigquery.table import RowIterator
dataset_id = _make_dataset_id("empty_table")
dataset = self.temp_dataset(dataset_id)
table_ref = dataset.table("empty_table")
table = Config.CLIENT.create_table(bigquery.Table(table_ref))
# It's a bit silly to list rows for an empty table, but this does
# happen as the result of a DDL query from an IPython magic command.
rows = Config.CLIENT.list_rows(table)
self.assertIsInstance(rows, RowIterator)
self.assertEqual(tuple(rows), ())
def test_list_rows_page_size(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
num_items = 7
page_size = 3
num_pages, num_last_page = divmod(num_items, page_size)
SF = bigquery.SchemaField
schema = [SF("string_col", "STRING", mode="NULLABLE")]
to_insert = [{"string_col": "item%d" % i} for i in range(num_items)]
rows = [json.dumps(row) for row in to_insert]
body = six.BytesIO("{}\n".format("\n".join(rows)).encode("ascii"))
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("nested_df"))
table = dataset.table(table_id)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
job_config.schema = schema
# Load a table using a local JSON file from memory.
Config.CLIENT.load_table_from_file(body, table, job_config=job_config).result()
df = Config.CLIENT.list_rows(table, selected_fields=schema, page_size=page_size)
pages = df.pages
for i in range(num_pages):
page = next(pages)
self.assertEqual(page.num_items, page_size)
page = next(pages)
self.assertEqual(page.num_items, num_last_page)
def temp_dataset(self, dataset_id, location=None):
dataset = Dataset(Config.CLIENT.dataset(dataset_id))
if location:
dataset.location = location
dataset = retry_403(Config.CLIENT.create_dataset)(dataset)
self.to_delete.append(dataset)
return dataset
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
@pytest.mark.skipif(IPython is None, reason="Requires `ipython`")
@pytest.mark.usefixtures("ipython_interactive")
def test_bigquery_magic():
ip = IPython.get_ipython()
ip.extension_manager.load_extension("google.cloud.bigquery")
sql = """
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
LIMIT 10
"""
with io.capture_output() as captured:
result = ip.run_cell_magic("bigquery", "", sql)
lines = re.split("\n|\r", captured.stdout)
# Removes blanks & terminal code (result of display clearing)
updates = list(filter(lambda x: bool(x) and x != "\x1b[2K", lines))
assert re.match("Executing query with job ID: .*", updates[0])
assert all(re.match("Query executing: .*s", line) for line in updates[1:-1])
assert re.match("Query complete after .*s", updates[-1])
assert isinstance(result, pandas.DataFrame)
assert len(result) == 10 # verify row count
assert list(result) == ["url", "view_count"] # verify column names
def _job_done(instance):
return instance.state.lower() == "done"
def _dataset_exists(ds):
try:
Config.CLIENT.get_dataset(DatasetReference(ds.project, ds.dataset_id))
return True
except NotFound:
return False
def _table_exists(t):
try:
tr = DatasetReference(t.project, t.dataset_id).table(t.table_id)
Config.CLIENT.get_table(tr)
return True
except NotFound:
return False
@pytest.fixture(scope="session")
def ipython():
config = tools.default_config()
config.TerminalInteractiveShell.simple_prompt = True
shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
return shell
@pytest.fixture()
def ipython_interactive(request, ipython):
"""Activate IPython's builtin hooks
for the duration of the test scope.
"""
with ipython.builtin_trap:
yield ipython
|
the-stack_106_16570
|
from django.utils.translation import gettext_lazy as _
from rest_framework.permissions import BasePermission
from rest_framework.permissions import (
DjangoModelPermissions as BaseDjangoModelPermissions,
)
from swapper import load_model
Organization = load_model('openwisp_users', 'Organization')
class BaseOrganizationPermission(BasePermission):
def has_object_permission(self, request, view, obj):
organization = self.get_object_organization(view, obj)
return self.validate_membership(request.user, organization)
def has_permission(self, request, view):
return request.user and request.user.is_authenticated
def get_object_organization(self, view, obj):
organization_field = getattr(view, 'organization_field', 'organization')
fields = organization_field.split('__')
accessed_object = obj
for field in fields:
accessed_object = getattr(accessed_object, field, None)
if not accessed_object:
raise AttributeError(
_(
'Organization not found, `organization_field` '
'not implemented correctly.'
)
)
return accessed_object
def validate_membership(self, user, org):
raise NotImplementedError(
_(
'View\'s permission_classes not implemented correctly.'
'Please use one of the child classes: IsOrganizationMember, '
'IsOrganizationManager or IsOrganizationOwner.'
)
)
class IsOrganizationMember(BaseOrganizationPermission):
message = _(
'User is not a member of the organization to which the '
'requested resource belongs.'
)
def validate_membership(self, user, org):
return org and (user.is_superuser or user.is_member(org))
class IsOrganizationManager(BaseOrganizationPermission):
message = _(
'User is not a manager of the organization to which the '
'requested resource belongs.'
)
def validate_membership(self, user, org):
return org and (user.is_superuser or user.is_manager(org))
class IsOrganizationOwner(BaseOrganizationPermission):
message = _(
'User is not a owner of the organization to which the '
'requested resource belongs.'
)
def validate_membership(self, user, org):
return org and (user.is_superuser or user.is_owner(org))
class DjangoModelPermissions(BaseDjangoModelPermissions):
perms_map = {
'GET': ['%(app_label)s.view_%(model_name)s'],
'OPTIONS': [],
'HEAD': ['%(app_label)s.view_%(model_name)s'],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
def has_permission(self, request, view):
# Workaround to ensure DjangoModelPermissions are not applied
# to the root view when using DefaultRouter.
if getattr(view, '_ignore_model_permissions', False):
return True
user = request.user
if not user or (not user.is_authenticated and self.authenticated_users_only):
return False
queryset = self._queryset(view)
perms = self.get_required_permissions(request.method, queryset.model)
change_perm = self.get_required_permissions('PUT', queryset.model)
if request.method == 'GET':
return user.has_perms(perms) or user.has_perms(change_perm)
return user.has_perms(perms)
|
the-stack_106_16571
|
import numbers
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, Union
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing
from pandas._typing import ArrayLike, DtypeObj
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.base import register_extension_dtype
from pandas.core.dtypes.common import (
is_bool_dtype,
is_datetime64_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.array_algos import masked_reductions
from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.tools.numeric import to_numeric
from .masked import BaseMaskedArray, BaseMaskedDtype
if TYPE_CHECKING:
import pyarrow
class _IntegerDtype(BaseMaskedDtype):
"""
An ExtensionDtype to hold a single size & kind of integer dtype.
These specific implementations are subclasses of the non-public
_IntegerDtype. For example we have Int8Dtype to represent signed int 8s.
The attributes name & type are set when these subclasses are created.
"""
def __repr__(self) -> str:
sign = "U" if self.is_unsigned_integer else ""
return f"{sign}Int{8 * self.itemsize}Dtype()"
@cache_readonly
def is_signed_integer(self) -> bool:
return self.kind == "i"
@cache_readonly
def is_unsigned_integer(self) -> bool:
return self.kind == "u"
@property
def _is_numeric(self) -> bool:
return True
@classmethod
def construct_array_type(cls) -> Type["IntegerArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return IntegerArray
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
# we only handle nullable EA dtypes and numeric numpy dtypes
if not all(
isinstance(t, BaseMaskedDtype)
or (
isinstance(t, np.dtype)
and (np.issubdtype(t, np.number) or np.issubdtype(t, np.bool_))
)
for t in dtypes
):
return None
np_dtype = np.find_common_type(
[t.numpy_dtype if isinstance(t, BaseMaskedDtype) else t for t in dtypes], []
)
if np.issubdtype(np_dtype, np.integer):
return INT_STR_TO_DTYPE[str(np_dtype)]
elif np.issubdtype(np_dtype, np.floating):
from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE
return FLOAT_STR_TO_DTYPE[str(np_dtype)]
return None
def __from_arrow__(
self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"]
) -> "IntegerArray":
"""
Construct IntegerArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
pyarrow_type = pyarrow.from_numpy_dtype(self.type)
if not array.type.equals(pyarrow_type):
array = array.cast(pyarrow_type)
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=self.type)
int_arr = IntegerArray(data.copy(), ~mask, copy=False)
results.append(int_arr)
return IntegerArray._concat_same_type(results)
def integer_array(values, dtype=None, copy: bool = False) -> "IntegerArray":
"""
Infer and return an integer array of the values.
Parameters
----------
values : 1D list-like
dtype : dtype, optional
dtype to coerce
copy : bool, default False
Returns
-------
IntegerArray
Raises
------
TypeError if incompatible types
"""
values, mask = coerce_to_array(values, dtype=dtype, copy=copy)
return IntegerArray(values, mask)
def safe_cast(values, dtype, copy: bool):
"""
Safely cast the values to the dtype if they
are equivalent, meaning floats must be equivalent to the
ints.
"""
try:
return values.astype(dtype, casting="safe", copy=copy)
except TypeError as err:
casted = values.astype(dtype, copy=copy)
if (casted == values).all():
return casted
raise TypeError(
f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
) from err
def coerce_to_array(
values, dtype, mask=None, copy: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve it's dtype
if dtype is None and hasattr(values, "dtype"):
if is_integer_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if isinstance(dtype, str) and (
dtype.startswith("Int") or dtype.startswith("UInt")
):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = INT_STR_TO_DTYPE[str(np.dtype(dtype))]
except KeyError as err:
raise ValueError(f"invalid dtype specified {dtype}") from err
if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == "empty":
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in [
"floating",
"integer",
"mixed-integer",
"integer-na",
"mixed-integer-float",
]:
raise TypeError(f"{values.dtype} cannot be converted to an IntegerDtype")
elif is_bool_dtype(values) and is_integer_dtype(dtype):
values = np.array(values, dtype=int, copy=copy)
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError(f"{values.dtype} cannot be converted to an IntegerDtype")
if mask is None:
mask = isna(values)
else:
assert len(mask) == len(values)
if not values.ndim == 1:
raise TypeError("values must be a 1D list-like")
if not mask.ndim == 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype("int64")
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
if mask.any():
values = values.copy()
values[mask] = 1
values = safe_cast(values, dtype, copy=False)
else:
values = safe_cast(values, dtype, copy=False)
return values, mask
class IntegerArray(BaseMaskedArray):
"""
Array of integer (optional missing) values.
.. versionadded:: 0.24.0
.. versionchanged:: 1.0.0
Now uses :attr:`pandas.NA` as the missing value rather
than :attr:`numpy.nan`.
.. warning::
IntegerArray is currently experimental, and its API or internal
implementation may change without warning.
We represent an IntegerArray with 2 numpy arrays:
- data: contains a numpy integer array of the appropriate dtype
- mask: a boolean array holding a mask on the data, True is missing
To construct an IntegerArray from generic array-like input, use
:func:`pandas.array` with one of the integer dtypes (see examples).
See :ref:`integer_na` for more.
Parameters
----------
values : numpy.ndarray
A 1-d integer-dtype array.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values.
copy : bool, default False
Whether to copy the `values` and `mask`.
Attributes
----------
None
Methods
-------
None
Returns
-------
IntegerArray
Examples
--------
Create an IntegerArray with :func:`pandas.array`.
>>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
>>> int_array
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
String aliases for the dtypes are also available. They are capitalized.
>>> pd.array([1, None, 3], dtype='Int32')
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
>>> pd.array([1, None, 3], dtype='UInt16')
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: UInt16
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = 1
@cache_readonly
def dtype(self) -> _IntegerDtype:
return INT_STR_TO_DTYPE[str(self._data.dtype)]
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype.kind in ["i", "u"]):
raise TypeError(
"values should be integer numpy array. Use "
"the 'pd.array' function instead"
)
super().__init__(values, mask, copy=copy)
def __neg__(self):
return type(self)(-self._data, self._mask)
def __pos__(self):
return self
def __abs__(self):
return type(self)(np.abs(self._data), self._mask)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "IntegerArray":
return integer_array(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_of_strings(
cls, strings, dtype=None, copy: bool = False
) -> "IntegerArray":
scalars = to_numeric(strings, errors="raise")
return cls._from_sequence(scalars, dtype, copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs):
# For IntegerArray inputs, we apply the ufunc to ._data
# and mask the result.
if method == "reduce":
# Not clear how to handle missing values in reductions. Raise.
raise NotImplementedError("The 'reduce' method is not supported.")
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (IntegerArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
mask = np.zeros(len(self), dtype=bool)
inputs2 = []
for x in inputs:
if isinstance(x, IntegerArray):
mask |= x._mask
inputs2.append(x._data)
else:
inputs2.append(x)
def reconstruct(x):
# we don't worry about scalar `x` here, since we
# raise for reduce up above.
if is_integer_dtype(x.dtype):
m = mask.copy()
return IntegerArray(x, m)
else:
x[mask] = np.nan
return x
result = getattr(ufunc, method)(*inputs2, **kwargs)
if isinstance(result, tuple):
tuple(reconstruct(x) for x in result)
else:
return reconstruct(result)
def _coerce_to_array(self, value) -> Tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value, dtype=self.dtype)
def astype(self, dtype, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
"""
from pandas.core.arrays.masked import BaseMaskedDtype
from pandas.core.arrays.string_ import StringDtype
dtype = pandas_dtype(dtype)
# if the dtype is exactly the same, we can fastpath
if self.dtype == dtype:
# return the same object for copy=False
return self.copy() if copy else self
# if we are astyping to another nullable masked dtype, we can fastpath
if isinstance(dtype, BaseMaskedDtype):
data = self._data.astype(dtype.numpy_dtype, copy=copy)
# mask is copied depending on whether the data was copied, and
# not directly depending on the `copy` keyword
mask = self._mask if data is self._data else self._mask.copy()
return dtype.construct_array_type()(data, mask, copy=False)
elif isinstance(dtype, StringDtype):
return dtype.construct_array_type()._from_sequence(self, copy=False)
# coerce
if is_float_dtype(dtype):
# In astype, we consider dtype=float to also mean na_value=np.nan
na_value = np.nan
elif is_datetime64_dtype(dtype):
na_value = np.datetime64("NaT")
else:
na_value = lib.no_default
return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
data = self._data.copy()
if self._mask.any():
data[self._mask] = data.min() - 1
return data
@classmethod
def _create_comparison_method(cls, op):
op_name = op.__name__
@unpack_zerodim_and_defer(op.__name__)
def cmp_method(self, other):
from pandas.core.arrays import BaseMaskedArray, BooleanArray
mask = None
if isinstance(other, BaseMaskedArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError(
"can only perform ops with 1-d structures"
)
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
# This may be fixed by NA.__array_ufunc__. Revisit this check
# once that's implemented.
result = np.zeros(self._data.shape, dtype="bool")
mask = np.ones(self._data.shape, dtype="bool")
else:
with warnings.catch_warnings():
# numpy may show a FutureWarning:
# elementwise comparison failed; returning scalar instead,
# but in the future will perform elementwise comparison
# before returning NotImplemented. We fall back to the correct
# behavior today, so that should be fine to ignore.
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
method = getattr(self._data, f"__{op_name}__")
result = method(other)
if result is NotImplemented:
result = invalid_comparison(self._data, other, op)
# nans propagate
if mask is None:
mask = self._mask.copy()
else:
mask = self._mask | mask
return BooleanArray(result, mask)
name = f"__{op.__name__}__"
return set_function_name(cmp_method, name, cls)
def sum(self, skipna=True, min_count=0, **kwargs):
nv.validate_sum((), kwargs)
result = masked_reductions.sum(
values=self._data, mask=self._mask, skipna=skipna, min_count=min_count
)
return result
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
if (is_float_dtype(other) or is_float(other)) or (
op_name in ["rtruediv", "truediv"]
):
result[mask] = np.nan
return result
return type(self)(result, mask, copy=False)
@classmethod
def _create_arithmetic_method(cls, op):
op_name = op.__name__
@unpack_zerodim_and_defer(op.__name__)
def integer_arithmetic_method(self, other):
omask = None
if getattr(other, "ndim", 0) > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if isinstance(other, IntegerArray):
other, omask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError(
"can only perform ops with 1-d structures"
)
if len(self) != len(other):
raise ValueError("Lengths must match")
if not (is_float_dtype(other) or is_integer_dtype(other)):
raise TypeError("can only perform ops with numeric values")
else:
if not (is_float(other) or is_integer(other) or other is libmissing.NA):
raise TypeError("can only perform ops with numeric values")
if omask is None:
mask = self._mask.copy()
if other is libmissing.NA:
mask |= True
else:
mask = self._mask | omask
if op_name == "pow":
# 1 ** x is 1.
mask = np.where((self._data == 1) & ~self._mask, False, mask)
# x ** 0 is 1.
if omask is not None:
mask = np.where((other == 0) & ~omask, False, mask)
elif other is not libmissing.NA:
mask = np.where(other == 0, False, mask)
elif op_name == "rpow":
# 1 ** x is 1.
if omask is not None:
mask = np.where((other == 1) & ~omask, False, mask)
elif other is not libmissing.NA:
mask = np.where(other == 1, False, mask)
# x ** 0 is 1.
mask = np.where((self._data == 0) & ~self._mask, False, mask)
if other is libmissing.NA:
result = np.ones_like(self._data)
else:
with np.errstate(all="ignore"):
result = op(self._data, other)
# divmod returns a tuple
if op_name == "divmod":
div, mod = result
return (
self._maybe_mask_result(div, mask, other, "floordiv"),
self._maybe_mask_result(mod, mask, other, "mod"),
)
return self._maybe_mask_result(result, mask, other, op_name)
name = f"__{op.__name__}__"
return set_function_name(integer_arithmetic_method, name, cls)
IntegerArray._add_arithmetic_ops()
IntegerArray._add_comparison_ops()
_dtype_docstring = """
An ExtensionDtype for {dtype} integer data.
.. versionchanged:: 1.0.0
Now uses :attr:`pandas.NA` as its missing value,
rather than :attr:`numpy.nan`.
Attributes
----------
None
Methods
-------
None
"""
# create the Dtype
@register_extension_dtype
class Int8Dtype(_IntegerDtype):
type = np.int8
name = "Int8"
__doc__ = _dtype_docstring.format(dtype="int8")
@register_extension_dtype
class Int16Dtype(_IntegerDtype):
type = np.int16
name = "Int16"
__doc__ = _dtype_docstring.format(dtype="int16")
@register_extension_dtype
class Int32Dtype(_IntegerDtype):
type = np.int32
name = "Int32"
__doc__ = _dtype_docstring.format(dtype="int32")
@register_extension_dtype
class Int64Dtype(_IntegerDtype):
type = np.int64
name = "Int64"
__doc__ = _dtype_docstring.format(dtype="int64")
@register_extension_dtype
class UInt8Dtype(_IntegerDtype):
type = np.uint8
name = "UInt8"
__doc__ = _dtype_docstring.format(dtype="uint8")
@register_extension_dtype
class UInt16Dtype(_IntegerDtype):
type = np.uint16
name = "UInt16"
__doc__ = _dtype_docstring.format(dtype="uint16")
@register_extension_dtype
class UInt32Dtype(_IntegerDtype):
type = np.uint32
name = "UInt32"
__doc__ = _dtype_docstring.format(dtype="uint32")
@register_extension_dtype
class UInt64Dtype(_IntegerDtype):
type = np.uint64
name = "UInt64"
__doc__ = _dtype_docstring.format(dtype="uint64")
INT_STR_TO_DTYPE: Dict[str, _IntegerDtype] = {
"int8": Int8Dtype(),
"int16": Int16Dtype(),
"int32": Int32Dtype(),
"int64": Int64Dtype(),
"uint8": UInt8Dtype(),
"uint16": UInt16Dtype(),
"uint32": UInt32Dtype(),
"uint64": UInt64Dtype(),
}
|
the-stack_106_16576
|
"""docter server."""
import os
import sys
import mimetypes
from datetime import datetime
from wsgiref import simple_server
import falcon
from jinja2 import Environment, FileSystemLoader
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def command_stdout(msg):
sys.stdout.write(msg)
def runserver(app, HOST='127.0.0.1', PORT=5016):
SETTINGS = 'server.settings'
httpd = simple_server.make_server(HOST, PORT, app)
command_stdout('Performing system checks...\n\n')
now = datetime.now().strftime('%B %d, %Y - %X')
command_stdout(now)
command_stdout((
'\nSystem check identified no issues (0 silenced).\n'
"Owl version 1.0, using settings '{settings}'\n"
'Starting development server at http://{addr}:{port}/\n'
'Quit the server with CONTROL-C.\n'
).format(
port=PORT,
addr=HOST,
settings=SETTINGS
))
try:
httpd.serve_forever()
except KeyboardInterrupt:
sys.exit(1)
def handle_404(req, resp):
resp.status = falcon.HTTP_404
resp.body = '404'
STATIC_ROOT = os.path.join(BASE_DIR, 'docter/static')
STATIC_URL = '/static/'
def handle_static(req, resp):
filepath = req.url.split(STATIC_URL)[-1]
filename = os.path.join(STATIC_ROOT, filepath)
if not os.path.exists(filename):
raise falcon.HTTPNotFound()
with open(os.path.abspath(filename), 'rb') as filehandler:
filedata = filehandler.read()
if not filedata:
return
content_type, _ = mimetypes.guess_type(req.url)
resp.content_type = content_type
resp.body = filedata
class HomePage(object):
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.content_type = 'text/html'
env = Environment(loader=FileSystemLoader(THIS_DIR), trim_blocks=True)
with open('demo.md') as file:
content = file.read()
# content = content.replace('\n', '\\n"\n+\n"')
data = env.get_template('demo.html').render(content=content)
resp.body = data
APP = falcon.API()
APP.add_sink(handle_404)
APP.add_sink(handle_static, prefix='/static')
APP.add_route('/', HomePage())
if __name__ == '__main__':
runserver(APP)
|
the-stack_106_16577
|
import time
from collections import OrderedDict
from ...plugins.Timer import Timer
from ...plugins.MitosPPumpController import MitosPPumpController
from ..Workflow import Workflow
inputs = OrderedDict(
ppumps_setup={},
delay_time=0.,
verbose=False
)
outputs = OrderedDict()
class TarePumps(Workflow):
def __init__(self):
super(TarePumps,self).__init__(inputs,outputs)
def run(self):
# pump controller plugins run on timer ticks
timer = Timer(dt=1.)
timer.start()
pumps = {}
for pump_nm,setup in self.inputs['ppumps_setup'].items():
pumps[pump_nm] = MitosPPumpController(
timer=timer,verbose=self.inputs['verbose'],**setup)
pumps[pump_nm].start()
self.message_callback('taring {}'.format(pump_nm))
pumps[pump_nm].tare()
# delay between tares to make it easier for a human to monitor
time.sleep(self.inputs['delay_time'])
# make sure all pumps are done taring
self.message_callback('waiting 10 seconds to allow tares to finish...')
time.sleep(10)
# stopping the timer should signal all pump controllers to stop
timer.stop()
|
the-stack_106_16578
|
from conans.client.output import ScopedOutput
from conans.client.source import complete_recipe_sources
from conans.model.ref import ConanFileReference, PackageReference
from conans.errors import NotFoundException, RecipeNotFoundException
from multiprocessing.pool import ThreadPool
def download(app, ref, package_ids, remote, recipe, recorder, remotes):
out, remote_manager, cache, loader = app.out, app.remote_manager, app.cache, app.loader
hook_manager = app.hook_manager
assert(isinstance(ref, ConanFileReference))
output = ScopedOutput(str(ref), out)
hook_manager.execute("pre_download", reference=ref, remote=remote)
try:
ref = remote_manager.get_recipe(ref, remote)
except NotFoundException:
raise RecipeNotFoundException(ref)
conan_file_path = cache.package_layout(ref).conanfile()
conanfile = loader.load_basic(conan_file_path)
# Download the sources too, don't be lazy
complete_recipe_sources(remote_manager, cache, conanfile, ref, remotes)
if not recipe: # Not only the recipe
if not package_ids: # User didn't specify a specific package binary
output.info("Getting the complete package list from '%s'..." % ref.full_str())
packages_props = remote_manager.search_packages(remote, ref, None)
package_ids = list(packages_props.keys())
if not package_ids:
output.warn("No remote binary packages found in remote")
parallel = cache.config.parallel_download
_download_binaries(conanfile, ref, package_ids, cache, remote_manager,
remote, output, recorder, parallel)
hook_manager.execute("post_download", conanfile_path=conan_file_path, reference=ref,
remote=remote)
def _download_binaries(conanfile, ref, package_ids, cache, remote_manager, remote, output,
recorder, parallel):
short_paths = conanfile.short_paths
def _download(package_id):
pref = PackageReference(ref, package_id)
layout = cache.package_layout(pref.ref, short_paths=short_paths)
if output and not output.is_terminal:
output.info("Downloading %s" % str(pref))
remote_manager.get_package(conanfile, pref, layout, remote, output, recorder)
if parallel is not None:
output.info("Downloading binary packages in %s parallel threads" % parallel)
thread_pool = ThreadPool(parallel)
thread_pool.map(_download, [package_id for package_id in package_ids])
thread_pool.close()
thread_pool.join()
else:
for package_id in package_ids:
_download(package_id)
|
the-stack_106_16580
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import itertools
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import exceptions
from sqlalchemy import and_
from neutron._i18n import _
from neutron.common import utils
from neutron.db import _utils as db_utils
from neutron.db import rbac_db_mixin
from neutron.db import rbac_db_models as models
from neutron.extensions import rbac as ext_rbac
from neutron.objects import base
from neutron.objects.db import api as obj_db_api
class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
base.NeutronDbObject,
metaclass=abc.ABCMeta):
rbac_db_cls = None
@classmethod
@abc.abstractmethod
def get_bound_project_ids(cls, context, obj_id):
"""Returns ids of all projects depending on this db object.
Has to be implemented by classes using RbacNeutronMetaclass.
The projects are the ones that need the sharing or 'visibility' of the
object to them. E.g: for QosPolicy that would be the projects using the
Networks and Ports with the shared QosPolicy applied to them.
:returns: set -- a set of projects' ids dependent on this object.
"""
@staticmethod
def is_network_shared(context, rbac_entries):
# NOTE(korzen) this method is copied from db_base_plugin_common.
# The shared attribute for a network now reflects if the network
# is shared to the calling project via an RBAC entry.
matches = ('*',) + ((context.project_id,) if context else ())
for entry in rbac_entries:
if (entry.action == models.ACCESS_SHARED and
entry.target_tenant in matches):
return True
return False
@staticmethod
def get_shared_with_project(context, rbac_db_cls, obj_id, project_id):
# NOTE(korzen) This method enables to query within already started
# session
rbac_db_model = rbac_db_cls.db_model
return (db_utils.model_query(context, rbac_db_model).filter(
and_(rbac_db_model.object_id == obj_id,
rbac_db_model.action == models.ACCESS_SHARED,
rbac_db_model.target_tenant.in_(
['*', project_id]))).count() != 0)
@classmethod
def is_shared_with_project(cls, context, obj_id, project_id):
ctx = context.elevated()
with cls.db_context_reader(ctx):
return cls.get_shared_with_project(ctx, cls.rbac_db_cls,
obj_id, project_id)
@classmethod
def is_accessible(cls, context, db_obj):
return (super(
RbacNeutronDbObjectMixin, cls).is_accessible(context, db_obj) or
cls.is_shared_with_project(context, db_obj.id,
context.project_id))
@classmethod
def _get_db_obj_rbac_entries(cls, context, rbac_obj_id, rbac_action):
rbac_db_model = cls.rbac_db_cls.db_model
return db_utils.model_query(context, rbac_db_model).filter(
and_(rbac_db_model.object_id == rbac_obj_id,
rbac_db_model.action == rbac_action))
@classmethod
def _get_projects_with_shared_access_to_db_obj(cls, context, obj_id):
rbac_db_model = cls.rbac_db_cls.db_model
return set(itertools.chain.from_iterable(context.session.query(
rbac_db_model.target_tenant).filter(
and_(rbac_db_model.object_id == obj_id,
rbac_db_model.action == models.ACCESS_SHARED,
rbac_db_model.target_tenant != '*'))))
@classmethod
def _validate_rbac_policy_delete(cls, context, obj_id, target_tenant):
ctx_admin = context.elevated()
rb_model = cls.rbac_db_cls.db_model
bound_project_ids = cls.get_bound_project_ids(ctx_admin, obj_id)
db_obj_sharing_entries = cls._get_db_obj_rbac_entries(
ctx_admin, obj_id, models.ACCESS_SHARED)
def raise_policy_in_use():
raise ext_rbac.RbacPolicyInUse(
object_id=obj_id,
details='project_id={}'.format(target_tenant))
if target_tenant != '*':
# if there is a wildcard rule, we can return early because it
# shares the object globally
wildcard_sharing_entries = db_obj_sharing_entries.filter(
rb_model.target_tenant == '*')
if wildcard_sharing_entries.count():
return
if target_tenant in bound_project_ids:
raise_policy_in_use()
return
# for the wildcard we need to query all of the rbac entries to
# see if any allow the object sharing
other_target_tenants = cls._get_projects_with_shared_access_to_db_obj(
ctx_admin, obj_id)
if not bound_project_ids.issubset(other_target_tenants):
raise_policy_in_use()
@classmethod
def validate_rbac_policy_delete(cls, resource, event, trigger,
payload=None):
"""Callback to handle RBAC_POLICY, BEFORE_DELETE callback.
:raises: RbacPolicyInUse -- in case the policy is in use.
"""
context = payload.context
policy = payload.latest_state
if policy['action'] != models.ACCESS_SHARED:
return
target_tenant = policy['target_tenant']
db_obj = obj_db_api.get_object(
cls, context.elevated(), id=policy['object_id'])
if db_obj.project_id == target_tenant:
return
cls._validate_rbac_policy_delete(context=context,
obj_id=policy['object_id'],
target_tenant=target_tenant)
@classmethod
def validate_rbac_policy_create(cls, resource, event, trigger,
payload=None):
"""Callback to handle RBAC_POLICY, BEFORE_CREATE callback.
"""
pass
@classmethod
def validate_rbac_policy_update(cls, resource, event, trigger,
payload=None):
"""Callback to handle RBAC_POLICY, BEFORE_UPDATE callback.
:raises: RbacPolicyInUse -- in case the update is forbidden.
"""
policy = payload.latest_state
prev_project = policy['target_tenant']
new_project = payload.request_body['target_tenant']
if prev_project == new_project:
return
if new_project != '*':
return cls.validate_rbac_policy_delete(
resource, event, trigger, payload=payload)
@classmethod
def validate_rbac_policy_change(cls, resource, event, trigger,
payload=None):
"""Callback to validate changes.
This is the dispatching function for create, update and delete
callbacks. On creation and update, verify that the creator is an admin
or owns the resource being shared.
"""
object_type = payload.metadata.get('object_type')
context = payload.context
policy = (payload.request_body if event == events.BEFORE_CREATE
else payload.latest_state)
# TODO(hdaniel): As this code was shamelessly stolen from
# NeutronDbPluginV2.validate_network_rbac_policy_change(), those pieces
# should be synced and contain the same bugs, until Network RBAC logic
# (hopefully) melded with this one.
if object_type != cls.rbac_db_cls.db_model.object_type:
return
db_obj = obj_db_api.get_object(
cls, context.elevated(), id=policy['object_id'])
if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE):
if (not context.is_admin and
db_obj['project_id'] != context.project_id):
msg = _("Only admins can manipulate policies on objects "
"they do not own")
raise exceptions.InvalidInput(error_message=msg)
callback_map = {events.BEFORE_CREATE: cls.validate_rbac_policy_create,
events.BEFORE_UPDATE: cls.validate_rbac_policy_update,
events.BEFORE_DELETE: cls.validate_rbac_policy_delete}
if event in callback_map:
return callback_map[event](resource, event, trigger,
payload=payload)
def attach_rbac(self, obj_id, project_id, target_tenant='*'):
obj_type = self.rbac_db_cls.db_model.object_type
rbac_policy = {'rbac_policy': {'object_id': obj_id,
'target_tenant': target_tenant,
'project_id': project_id,
'object_type': obj_type,
'action': models.ACCESS_SHARED}}
return self.create_rbac_policy(self.obj_context, rbac_policy)
def update_shared(self, is_shared_new, obj_id):
admin_context = self.obj_context.elevated()
shared_prev = obj_db_api.get_object(self.rbac_db_cls, admin_context,
object_id=obj_id,
target_tenant='*',
action=models.ACCESS_SHARED)
is_shared_prev = bool(shared_prev)
if is_shared_prev == is_shared_new:
return
# 'shared' goes False -> True
if not is_shared_prev and is_shared_new:
self.attach_rbac(obj_id, self.obj_context.project_id)
return
# 'shared' goes True -> False is actually an attempt to delete
# rbac rule for sharing obj_id with target_tenant = '*'
self._validate_rbac_policy_delete(self.obj_context, obj_id, '*')
return self.obj_context.session.delete(shared_prev)
def from_db_object(self, db_obj):
self._load_shared(db_obj)
super(RbacNeutronDbObjectMixin, self).from_db_object(db_obj)
def obj_load_attr(self, attrname):
if attrname == 'shared':
return self._load_shared()
super(RbacNeutronDbObjectMixin, self).obj_load_attr(attrname)
def _load_shared(self, db_obj=None):
# Do not override 'shared' attribute on create() or update()
if 'shared' in self.obj_get_changes():
return
if db_obj:
# NOTE(korzen) db_obj is passed when object is loaded from DB
rbac_entries = db_obj.get('rbac_entries') or {}
shared = self.is_network_shared(self.obj_context, rbac_entries)
else:
# NOTE(korzen) this case is used when object was
# instantiated and without DB interaction (get_object(s), update,
# create), it should be rare case to load 'shared' by that method
shared = self.get_shared_with_project(
self.obj_context.elevated(),
self.rbac_db_cls,
self.id,
self.project_id
)
setattr(self, 'shared', shared)
self.obj_reset_changes(['shared'])
def _update_post(self, obj_changes):
if "shared" in obj_changes:
self.update_shared(self.shared, self.id)
def _update_hook(self, update_orig):
with self.db_context_writer(self.obj_context):
# NOTE(slaweq): copy of object changes is required to pass it later to
# _update_post method because update() will reset all those changes
obj_changes = self.obj_get_changes()
update_orig(self)
_update_post(self, obj_changes)
self._load_shared(db_obj=self.db_obj)
def _create_post(self):
if self.shared:
self.attach_rbac(self.id, self.project_id)
def _create_hook(self, orig_create):
with self.db_context_writer(self.obj_context):
orig_create(self)
_create_post(self)
self._load_shared(db_obj=self.db_obj)
def _to_dict_hook(self, to_dict_orig):
dct = to_dict_orig(self)
if self.obj_context:
dct['shared'] = self.is_shared_with_project(
self.obj_context, self.id, self.obj_context.project_id)
else:
# most OVO objects on an agent will not have a context set on the
# object because they will be generated from obj_from_primitive.
dct['shared'] = False
return dct
class RbacNeutronMetaclass(type):
"""Adds support for RBAC in NeutronDbObjects.
Injects code for CRUD operations and modifies existing ops to do so.
"""
@classmethod
def _get_attribute(cls, attribute_name, bases):
for b in bases:
attribute = getattr(b, attribute_name, None)
if attribute:
return attribute
@classmethod
def get_attribute(cls, attribute_name, bases, dct):
return (dct.get(attribute_name, None) or
cls._get_attribute(attribute_name, bases))
@classmethod
def update_synthetic_fields(cls, bases, dct):
if not dct.get('synthetic_fields', None):
synthetic_attr = cls.get_attribute('synthetic_fields', bases, dct)
dct['synthetic_fields'] = synthetic_attr or []
if 'shared' in dct['synthetic_fields']:
raise exceptions.ObjectActionError(
action=_('shared attribute switching to synthetic'),
reason=_('already a synthetic attribute'))
dct['synthetic_fields'].append('shared')
@staticmethod
def subscribe_to_rbac_events(class_instance):
for e in (events.BEFORE_CREATE, events.BEFORE_UPDATE,
events.BEFORE_DELETE):
registry.subscribe(class_instance.validate_rbac_policy_change,
resources.RBAC_POLICY, e)
@staticmethod
def validate_existing_attrs(cls_name, dct):
if 'shared' not in dct['fields']:
raise KeyError(_('No shared key in %s fields') % cls_name)
if 'rbac_db_cls' not in dct:
raise AttributeError(_('rbac_db_cls not found in %s') % cls_name)
@staticmethod
def get_replaced_method(orig_method, new_method):
def func(self):
return new_method(self, orig_method)
return func
@classmethod
def replace_class_methods_with_hooks(cls, bases, dct):
methods_replacement_map = {'create': _create_hook,
'update': _update_hook,
'to_dict': _to_dict_hook}
for orig_method_name, new_method in methods_replacement_map.items():
orig_method = cls.get_attribute(orig_method_name, bases, dct)
hook_method = cls.get_replaced_method(orig_method,
new_method)
dct[orig_method_name] = hook_method
def __new__(cls, name, bases, dct):
cls.validate_existing_attrs(name, dct)
cls.update_synthetic_fields(bases, dct)
cls.replace_class_methods_with_hooks(bases, dct)
klass = type(name, (RbacNeutronDbObjectMixin,) + bases, dct)
klass.add_extra_filter_name('shared')
cls.subscribe_to_rbac_events(klass)
return klass
NeutronRbacObject = utils.with_metaclass(RbacNeutronMetaclass,
base.NeutronDbObject)
|
the-stack_106_16586
|
import unittest
import json
import sys
from splunk.appserver.mrsparkle.lib.util import make_splunkhome_path
sys.path.insert(0, make_splunkhome_path(["etc", "apps", "amp4e_events_input"]))
from splunklib.client import Service, KVStoreCollection
from bin.amp4e_events_input.amp_storage_wrapper import AmpStorageWrapper
from test.support.mock_definitions import MockDefinitions
from test.support.config import SPLUNK_AUTH_OPTIONS
class TestWrapperWithoutConnection(unittest.TestCase):
def setUp(self):
self.metadata = MockDefinitions().metadata
self.storage_wrapper = AmpStorageWrapper(self.metadata)
def test_instantiation(self):
self.assertEqual(self.storage_wrapper.input_name, self.metadata['name'])
self.assertIsNone(self.storage_wrapper._store)
self.assertIsInstance(self.storage_wrapper.service, Service)
def test_name_query_property(self):
self.assertEqual(self.storage_wrapper._AmpStorageWrapper__name_query,
{'input_name': self.metadata['name']})
class TestWrapperWithConnection(unittest.TestCase):
COLLECTION_NAME = 'TestAmpEventStreams'
def setUp(self):
AmpStorageWrapper.COLLECTION_NAME = self.COLLECTION_NAME
self.service = Service(owner='nobody', app='amp4e_events_input', scheme=SPLUNK_AUTH_OPTIONS['scheme'],
host=SPLUNK_AUTH_OPTIONS['host'], port=SPLUNK_AUTH_OPTIONS['port'],
username=SPLUNK_AUTH_OPTIONS['username'], password=SPLUNK_AUTH_OPTIONS['password'])
self.service.login()
self.service.kvstore.create(self.COLLECTION_NAME)
self.metadata = MockDefinitions(self.service.token).metadata
self.stream_representation = {'input_name': self.metadata['name']}
self.storage = AmpStorageWrapper(self.metadata)
def tearDown(self):
self.service.kvstore.delete(self.COLLECTION_NAME)
def test_collection(self):
self.assertIsInstance(self.storage.collection, KVStoreCollection)
self.assertIn(self.COLLECTION_NAME, [x.name for x in self.service.kvstore])
def test_find_stream(self):
self.__create_stream()
self.assertEqual(self.storage.find_stream()['input_name'], self.stream_representation['input_name'])
# returns None if stream cannot be found
def test_find_stream_none(self):
self.assertIsNone(self.storage.find_stream())
def test_find_stream_other_query(self):
new_data = {'custom_field': '1234'}
new_representation = self.stream_representation.copy()
new_representation.update(new_data)
self.__create_stream(new_representation)
found_stream = self.storage.find_stream(new_data)
self.assertEqual(found_stream['input_name'], self.stream_representation['input_name'])
self.assertEqual(found_stream['custom_field'], new_data['custom_field'])
def test_delete_stream(self):
self.__create_stream()
self.storage.delete_stream()
self.assertEqual(self.service.kvstore[self.COLLECTION_NAME].data
.query(query=json.dumps(self.stream_representation)), [])
# does not raise error if stream does not exist
def test_delete_stream_none(self):
self.storage.delete_stream()
def test_save_stream_with_data_create(self):
additional_data = {'test_key': 'test_value'}
self.assertEqual(self.service.kvstore[self.COLLECTION_NAME]
.data.query(query=json.dumps(self.stream_representation)), [])
self.storage.save_stream_with_data(additional_data)
streams = self.service.kvstore[self.COLLECTION_NAME].data.query(query=json.dumps(self.stream_representation))
self.assertEqual(len(streams), 1)
stream = streams[0]
self.assertEqual(stream['input_name'], self.stream_representation['input_name'])
self.assertEqual(stream['test_key'], additional_data['test_key'])
def test_save_stream_with_data_update(self):
self.__create_stream()
self.assertEqual(len(self.service.kvstore[self.COLLECTION_NAME]
.data.query(query=json.dumps(self.stream_representation))), 1)
additional_data = {'test_key': 'test_value'}
self.storage.save_stream_with_data(additional_data)
streams = self.service.kvstore[self.COLLECTION_NAME].data.query(query=json.dumps(self.stream_representation))
self.assertEqual(len(streams), 1)
stream = streams[0]
self.assertEqual(stream['input_name'], self.stream_representation['input_name'])
self.assertEqual(stream['test_key'], additional_data['test_key'])
def __create_stream(self, query=None):
query = query if query is not None else self.stream_representation
self.service.kvstore[self.COLLECTION_NAME].data.insert(json.dumps(query))
|
the-stack_106_16587
|
#!/usr/bin/env python
#
# 'idf.py' is a top-level config/build command line tool for ESP-IDF
#
# You don't have to use idf.py, you can use cmake directly
# (or use cmake in an IDE)
#
#
#
# Copyright 2019 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WARNING: we don't check for Python build-time dependencies until
# check_environment() function below. If possible, avoid importing
# any external libraries here - put in external script, or import in
# their specific function instead.
from __future__ import print_function
import codecs
import json
import locale
import os
import os.path
import subprocess
import sys
from collections import Counter, OrderedDict
from importlib import import_module
from pkgutil import iter_modules
# pyc files remain in the filesystem when switching between branches which might raise errors for incompatible
# idf.py extensions. Therefore, pyc file generation is turned off:
sys.dont_write_bytecode = True
from idf_py_actions.errors import FatalError # noqa: E402
from idf_py_actions.tools import (executable_exists, idf_version, merge_action_lists, realpath) # noqa: E402
# Use this Python interpreter for any subprocesses we launch
PYTHON = sys.executable
# note: os.environ changes don't automatically propagate to child processes,
# you have to pass env=os.environ explicitly anywhere that we create a process
os.environ["PYTHON"] = sys.executable
# Name of the program, normally 'idf.py'.
# Can be overridden from idf.bat using IDF_PY_PROGRAM_NAME
PROG = os.getenv("IDF_PY_PROGRAM_NAME", "idf.py")
# function prints warning when autocompletion is not being performed
# set argument stream to sys.stderr for errors and exceptions
def print_warning(message, stream=None):
stream = stream or sys.stderr
if not os.getenv('_IDF.PY_COMPLETE'):
print(message, file=stream)
def check_environment():
"""
Verify the environment contains the top-level tools we need to operate
(cmake will check a lot of other things)
"""
checks_output = []
if not executable_exists(["cmake", "--version"]):
debug_print_idf_version()
raise FatalError("'cmake' must be available on the PATH to use %s" % PROG)
# verify that IDF_PATH env variable is set
# find the directory idf.py is in, then the parent directory of this, and assume this is IDF_PATH
detected_idf_path = realpath(os.path.join(os.path.dirname(__file__), ".."))
if "IDF_PATH" in os.environ:
set_idf_path = realpath(os.environ["IDF_PATH"])
if set_idf_path != detected_idf_path:
print_warning(
"WARNING: IDF_PATH environment variable is set to %s but %s path indicates IDF directory %s. "
"Using the environment variable directory, but results may be unexpected..." %
(set_idf_path, PROG, detected_idf_path))
else:
print_warning("Setting IDF_PATH environment variable: %s" % detected_idf_path)
os.environ["IDF_PATH"] = detected_idf_path
# check Python version
if sys.version_info[0] < 3:
print_warning("WARNING: Support for Python 2 is deprecated and will be removed in future versions.")
elif sys.version_info[0] == 3 and sys.version_info[1] < 6:
print_warning("WARNING: Python 3 versions older than 3.6 are not supported.")
# check Python dependencies
checks_output.append("Checking Python dependencies...")
try:
out = subprocess.check_output(
[
os.environ["PYTHON"],
os.path.join(os.environ["IDF_PATH"], "tools", "check_python_dependencies.py"),
],
env=os.environ,
)
checks_output.append(out.decode('utf-8', 'ignore').strip())
except subprocess.CalledProcessError as e:
print_warning(e.output.decode('utf-8', 'ignore'), stream=sys.stderr)
debug_print_idf_version()
raise SystemExit(1)
return checks_output
def _safe_relpath(path, start=None):
""" Return a relative path, same as os.path.relpath, but only if this is possible.
It is not possible on Windows, if the start directory and the path are on different drives.
"""
try:
return os.path.relpath(path, os.curdir if start is None else start)
except ValueError:
return os.path.abspath(path)
def debug_print_idf_version():
version = idf_version()
if version:
print_warning("ESP-IDF %s" % version)
else:
print_warning("ESP-IDF version unknown")
class PropertyDict(dict):
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("'PropertyDict' object has no attribute '%s'" % name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("'PropertyDict' object has no attribute '%s'" % name)
def init_cli(verbose_output=None):
# Click is imported here to run it after check_environment()
import click
class Deprecation(object):
"""Construct deprecation notice for help messages"""
def __init__(self, deprecated=False):
self.deprecated = deprecated
self.since = None
self.removed = None
self.exit_with_error = None
self.custom_message = ""
if isinstance(deprecated, dict):
self.custom_message = deprecated.get("message", "")
self.since = deprecated.get("since", None)
self.removed = deprecated.get("removed", None)
self.exit_with_error = deprecated.get("exit_with_error", None)
elif isinstance(deprecated, str):
self.custom_message = deprecated
def full_message(self, type="Option"):
if self.exit_with_error:
return "%s is deprecated %sand was removed%s.%s" % (
type,
"since %s " % self.since if self.since else "",
" in %s" % self.removed if self.removed else "",
" %s" % self.custom_message if self.custom_message else "",
)
else:
return "%s is deprecated %sand will be removed in%s.%s" % (
type,
"since %s " % self.since if self.since else "",
" %s" % self.removed if self.removed else " future versions",
" %s" % self.custom_message if self.custom_message else "",
)
def help(self, text, type="Option", separator=" "):
text = text or ""
return self.full_message(type) + separator + text if self.deprecated else text
def short_help(self, text):
text = text or ""
return ("Deprecated! " + text) if self.deprecated else text
def check_deprecation(ctx):
"""Prints deprecation warnings for arguments in given context"""
for option in ctx.command.params:
default = () if option.multiple else option.default
if isinstance(option, Option) and option.deprecated and ctx.params[option.name] != default:
deprecation = Deprecation(option.deprecated)
if deprecation.exit_with_error:
raise FatalError("Error: %s" % deprecation.full_message('Option "%s"' % option.name))
else:
print_warning("Warning: %s" % deprecation.full_message('Option "%s"' % option.name))
class Task(object):
def __init__(self, callback, name, aliases, dependencies, order_dependencies, action_args):
self.callback = callback
self.name = name
self.dependencies = dependencies
self.order_dependencies = order_dependencies
self.action_args = action_args
self.aliases = aliases
def __call__(self, context, global_args, action_args=None):
if action_args is None:
action_args = self.action_args
self.callback(self.name, context, global_args, **action_args)
class Action(click.Command):
def __init__(
self,
name=None,
aliases=None,
deprecated=False,
dependencies=None,
order_dependencies=None,
hidden=False,
**kwargs):
super(Action, self).__init__(name, **kwargs)
self.name = self.name or self.callback.__name__
self.deprecated = deprecated
self.hidden = hidden
if aliases is None:
aliases = []
self.aliases = aliases
self.help = self.help or self.callback.__doc__
if self.help is None:
self.help = ""
if dependencies is None:
dependencies = []
if order_dependencies is None:
order_dependencies = []
# Show first line of help if short help is missing
self.short_help = self.short_help or self.help.split("\n")[0]
if deprecated:
deprecation = Deprecation(deprecated)
self.short_help = deprecation.short_help(self.short_help)
self.help = deprecation.help(self.help, type="Command", separator="\n")
# Add aliases to help string
if aliases:
aliases_help = "Aliases: %s." % ", ".join(aliases)
self.help = "\n".join([self.help, aliases_help])
self.short_help = " ".join([aliases_help, self.short_help])
self.unwrapped_callback = self.callback
if self.callback is not None:
def wrapped_callback(**action_args):
return Task(
callback=self.unwrapped_callback,
name=self.name,
dependencies=dependencies,
order_dependencies=order_dependencies,
action_args=action_args,
aliases=self.aliases,
)
self.callback = wrapped_callback
def invoke(self, ctx):
if self.deprecated:
deprecation = Deprecation(self.deprecated)
message = deprecation.full_message('Command "%s"' % self.name)
if deprecation.exit_with_error:
raise FatalError("Error: %s" % message)
else:
print_warning("Warning: %s" % message)
self.deprecated = False # disable Click's built-in deprecation handling
# Print warnings for options
check_deprecation(ctx)
return super(Action, self).invoke(ctx)
class Argument(click.Argument):
"""
Positional argument
names - alias of 'param_decls'
"""
def __init__(self, **kwargs):
names = kwargs.pop("names")
super(Argument, self).__init__(names, **kwargs)
class Scope(object):
"""
Scope for sub-command option.
possible values:
- default - only available on defined level (global/action)
- global - When defined for action, also available as global
- shared - Opposite to 'global': when defined in global scope, also available for all actions
"""
SCOPES = ("default", "global", "shared")
def __init__(self, scope=None):
if scope is None:
self._scope = "default"
elif isinstance(scope, str) and scope in self.SCOPES:
self._scope = scope
elif isinstance(scope, Scope):
self._scope = str(scope)
else:
raise FatalError("Unknown scope for option: %s" % scope)
@property
def is_global(self):
return self._scope == "global"
@property
def is_shared(self):
return self._scope == "shared"
def __str__(self):
return self._scope
class Option(click.Option):
"""Option that knows whether it should be global"""
def __init__(self, scope=None, deprecated=False, hidden=False, **kwargs):
"""
Keyword arguments additional to Click's Option class:
names - alias of 'param_decls'
deprecated - marks option as deprecated. May be boolean, string (with custom deprecation message)
or dict with optional keys:
since: version of deprecation
removed: version when option will be removed
custom_message: Additional text to deprecation warning
"""
kwargs["param_decls"] = kwargs.pop("names")
super(Option, self).__init__(**kwargs)
self.deprecated = deprecated
self.scope = Scope(scope)
self.hidden = hidden
if deprecated:
deprecation = Deprecation(deprecated)
self.help = deprecation.help(self.help)
if self.envvar:
self.help += " The default value can be set with the %s environment variable." % self.envvar
if self.scope.is_global:
self.help += " This option can be used at most once either globally, or for one subcommand."
def get_help_record(self, ctx):
# Backport "hidden" parameter to click 5.0
if self.hidden:
return
return super(Option, self).get_help_record(ctx)
class CLI(click.MultiCommand):
"""Action list contains all actions with options available for CLI"""
def __init__(self, all_actions=None, verbose_output=None, help=None):
super(CLI, self).__init__(
chain=True,
invoke_without_command=True,
result_callback=self.execute_tasks,
context_settings={"max_content_width": 140},
help=help,
)
self._actions = {}
self.global_action_callbacks = []
self.commands_with_aliases = {}
if verbose_output is None:
verbose_output = []
self.verbose_output = verbose_output
if all_actions is None:
all_actions = {}
shared_options = []
# Global options
for option_args in all_actions.get("global_options", []):
option = Option(**option_args)
self.params.append(option)
if option.scope.is_shared:
shared_options.append(option)
# Global options validators
self.global_action_callbacks = all_actions.get("global_action_callbacks", [])
# Actions
for name, action in all_actions.get("actions", {}).items():
arguments = action.pop("arguments", [])
options = action.pop("options", [])
if arguments is None:
arguments = []
if options is None:
options = []
self._actions[name] = Action(name=name, **action)
for alias in [name] + action.get("aliases", []):
self.commands_with_aliases[alias] = name
for argument_args in arguments:
self._actions[name].params.append(Argument(**argument_args))
# Add all shared options
for option in shared_options:
self._actions[name].params.append(option)
for option_args in options:
option = Option(**option_args)
if option.scope.is_shared:
raise FatalError(
'"%s" is defined for action "%s". '
' "shared" options can be declared only on global level' % (option.name, name))
# Promote options to global if see for the first time
if option.scope.is_global and option.name not in [o.name for o in self.params]:
self.params.append(option)
self._actions[name].params.append(option)
def list_commands(self, ctx):
return sorted(filter(lambda name: not self._actions[name].hidden, self._actions))
def get_command(self, ctx, name):
if name in self.commands_with_aliases:
return self._actions.get(self.commands_with_aliases.get(name))
# Trying fallback to build target (from "all" action) if command is not known
else:
return Action(name=name, callback=self._actions.get('fallback').unwrapped_callback)
def _print_closing_message(self, args, actions):
# print a closing message of some kind
#
if "flash" in str(actions) or "dfu" in str(actions):
print("Done")
return
if not os.path.exists(os.path.join(args.build_dir, "flasher_args.json")):
print("Done")
return
# Otherwise, if we built any binaries print a message about
# how to flash them
def print_flashing_message(title, key):
with open(os.path.join(args.build_dir, "flasher_args.json")) as f:
flasher_args = json.load(f)
def flasher_path(f):
return _safe_relpath(os.path.join(args.build_dir, f))
if key != "project": # flashing a single item
if key not in flasher_args:
# This is the case for 'idf.py bootloader' if Secure Boot is on, need to follow manual flashing steps
print("\n%s build complete." % title)
return
cmd = ""
if (key == "bootloader"): # bootloader needs --flash-mode, etc to be passed in
cmd = " ".join(flasher_args["write_flash_args"]) + " "
cmd += flasher_args[key]["offset"] + " "
cmd += flasher_path(flasher_args[key]["file"])
else: # flashing the whole project
cmd = " ".join(flasher_args["write_flash_args"]) + " "
flash_items = sorted(
((o, f) for (o, f) in flasher_args["flash_files"].items() if len(o) > 0),
key=lambda x: int(x[0], 0),
)
for o, f in flash_items:
cmd += o + " " + flasher_path(f) + " "
print("\n%s build complete. To flash, run this command:" % title)
print(
"%s %s -p %s -b %s --before %s --after %s --chip %s %s write_flash %s" % (
PYTHON,
_safe_relpath("%s/components/esptool_py/esptool/esptool.py" % os.environ["IDF_PATH"]),
args.port or "(PORT)",
args.baud,
flasher_args["extra_esptool_args"]["before"],
flasher_args["extra_esptool_args"]["after"],
flasher_args["extra_esptool_args"]["chip"],
"--no-stub" if not flasher_args["extra_esptool_args"]["stub"] else "",
cmd.strip(),
))
print(
"or run 'idf.py -p %s %s'" % (
args.port or "(PORT)",
key + "-flash" if key != "project" else "flash",
))
if "all" in actions or "build" in actions:
print_flashing_message("Project", "project")
else:
if "app" in actions:
print_flashing_message("App", "app")
if "partition_table" in actions:
print_flashing_message("Partition Table", "partition_table")
if "bootloader" in actions:
print_flashing_message("Bootloader", "bootloader")
def execute_tasks(self, tasks, **kwargs):
ctx = click.get_current_context()
global_args = PropertyDict(kwargs)
def _help_and_exit():
print(ctx.get_help())
ctx.exit()
# Show warning if some tasks are present several times in the list
dupplicated_tasks = sorted(
[item for item, count in Counter(task.name for task in tasks).items() if count > 1])
if dupplicated_tasks:
dupes = ", ".join('"%s"' % t for t in dupplicated_tasks)
print_warning(
"WARNING: Command%s found in the list of commands more than once. " %
("s %s are" % dupes if len(dupplicated_tasks) > 1 else " %s is" % dupes) +
"Only first occurrence will be executed.")
for task in tasks:
# Show help and exit if help is in the list of commands
if task.name == 'help':
_help_and_exit()
# Set propagated global options.
# These options may be set on one subcommand, but available in the list of global arguments
for key in list(task.action_args):
option = next((o for o in ctx.command.params if o.name == key), None)
if option and (option.scope.is_global or option.scope.is_shared):
local_value = task.action_args.pop(key)
global_value = global_args[key]
default = () if option.multiple else option.default
if global_value != default and local_value != default and global_value != local_value:
raise FatalError(
'Option "%s" provided for "%s" is already defined to a different value. '
"This option can appear at most once in the command line." % (key, task.name))
if local_value != default:
global_args[key] = local_value
# Show warnings about global arguments
check_deprecation(ctx)
# Make sure that define_cache_entry is mutable list and can be modified in callbacks
global_args.define_cache_entry = list(global_args.define_cache_entry)
# Execute all global action callback - first from idf.py itself, then from extensions
for action_callback in ctx.command.global_action_callbacks:
action_callback(ctx, global_args, tasks)
# Always show help when command is not provided
if not tasks:
_help_and_exit()
# Build full list of tasks to and deal with dependencies and order dependencies
tasks_to_run = OrderedDict()
while tasks:
task = tasks[0]
tasks_dict = dict([(t.name, t) for t in tasks])
dependecies_processed = True
# If task have some dependecies they have to be executed before the task.
for dep in task.dependencies:
if dep not in tasks_to_run.keys():
# If dependent task is in the list of unprocessed tasks move to the front of the list
if dep in tasks_dict.keys():
dep_task = tasks.pop(tasks.index(tasks_dict[dep]))
# Otherwise invoke it with default set of options
# and put to the front of the list of unprocessed tasks
else:
print(
'Adding "%s"\'s dependency "%s" to list of commands with default set of options.' %
(task.name, dep))
dep_task = ctx.invoke(ctx.command.get_command(ctx, dep))
# Remove options with global scope from invoke tasks because they are already in global_args
for key in list(dep_task.action_args):
option = next((o for o in ctx.command.params if o.name == key), None)
if option and (option.scope.is_global or option.scope.is_shared):
dep_task.action_args.pop(key)
tasks.insert(0, dep_task)
dependecies_processed = False
# Order only dependencies are moved to the front of the queue if they present in command list
for dep in task.order_dependencies:
if dep in tasks_dict.keys() and dep not in tasks_to_run.keys():
tasks.insert(0, tasks.pop(tasks.index(tasks_dict[dep])))
dependecies_processed = False
if dependecies_processed:
# Remove task from list of unprocessed tasks
tasks.pop(0)
# And add to the queue
if task.name not in tasks_to_run.keys():
tasks_to_run.update([(task.name, task)])
# Run all tasks in the queue
# when global_args.dry_run is true idf.py works in idle mode and skips actual task execution
if not global_args.dry_run:
for task in tasks_to_run.values():
name_with_aliases = task.name
if task.aliases:
name_with_aliases += " (aliases: %s)" % ", ".join(task.aliases)
print("Executing action: %s" % name_with_aliases)
task(ctx, global_args, task.action_args)
self._print_closing_message(global_args, tasks_to_run.keys())
return tasks_to_run
# That's a tiny parser that parse project-dir even before constructing
# fully featured click parser to be sure that extensions are loaded from the right place
@click.command(
add_help_option=False,
context_settings={
"allow_extra_args": True,
"ignore_unknown_options": True
},
)
@click.option("-C", "--project-dir", default=os.getcwd(), type=click.Path())
def parse_project_dir(project_dir):
return realpath(project_dir)
# Set `complete_var` to not existing environment variable name to prevent early cmd completion
project_dir = parse_project_dir(standalone_mode=False, complete_var="_IDF.PY_COMPLETE_NOT_EXISTING")
all_actions = {}
# Load extensions from components dir
idf_py_extensions_path = os.path.join(os.environ["IDF_PATH"], "tools", "idf_py_actions")
extension_dirs = [realpath(idf_py_extensions_path)]
extra_paths = os.environ.get("IDF_EXTRA_ACTIONS_PATH")
if extra_paths is not None:
for path in extra_paths.split(';'):
path = realpath(path)
if path not in extension_dirs:
extension_dirs.append(path)
extensions = {}
for directory in extension_dirs:
if directory and not os.path.exists(directory):
print_warning('WARNING: Directory with idf.py extensions doesn\'t exist:\n %s' % directory)
continue
sys.path.append(directory)
for _finder, name, _ispkg in sorted(iter_modules([directory])):
if name.endswith('_ext'):
extensions[name] = import_module(name)
# Load component manager if available and not explicitly disabled
if os.getenv('IDF_COMPONENT_MANAGER', None) != '0':
try:
from idf_component_manager import idf_extensions
extensions['component_manager_ext'] = idf_extensions
os.environ['IDF_COMPONENT_MANAGER'] = '1'
except ImportError:
pass
for name, extension in extensions.items():
try:
all_actions = merge_action_lists(all_actions, extension.action_extensions(all_actions, project_dir))
except AttributeError:
print_warning('WARNING: Cannot load idf.py extension "%s"' % name)
# Load extensions from project dir
if os.path.exists(os.path.join(project_dir, "idf_ext.py")):
sys.path.append(project_dir)
try:
from idf_ext import action_extensions
except ImportError:
print_warning("Error importing extension file idf_ext.py. Skipping.")
print_warning("Please make sure that it contains implementation (even if it's empty) of add_action_extensions")
try:
all_actions = merge_action_lists(all_actions, action_extensions(all_actions, project_dir))
except NameError:
pass
cli_help = (
"ESP-IDF CLI build management tool. "
"For commands that are not known to idf.py an attempt to execute it as a build system target will be made.")
return CLI(help=cli_help, verbose_output=verbose_output, all_actions=all_actions)
def main():
checks_output = check_environment()
cli = init_cli(verbose_output=checks_output)
# the argument `prog_name` must contain name of the file - not the absolute path to it!
cli(sys.argv[1:], prog_name=PROG, complete_var="_IDF.PY_COMPLETE")
def _valid_unicode_config():
# Python 2 is always good
if sys.version_info[0] == 2:
return True
# With python 3 unicode environment is required
try:
return codecs.lookup(locale.getpreferredencoding()).name != "ascii"
except Exception:
return False
def _find_usable_locale():
try:
locales = subprocess.Popen(["locale", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
except OSError:
locales = ""
if isinstance(locales, bytes):
locales = locales.decode("ascii", "replace")
usable_locales = []
for line in locales.splitlines():
locale = line.strip()
locale_name = locale.lower().replace("-", "")
# C.UTF-8 is the best option, if supported
if locale_name == "c.utf8":
return locale
if locale_name.endswith(".utf8"):
# Make a preference of english locales
if locale.startswith("en_"):
usable_locales.insert(0, locale)
else:
usable_locales.append(locale)
if not usable_locales:
raise FatalError(
"Support for Unicode filenames is required, but no suitable UTF-8 locale was found on your system."
" Please refer to the manual for your operating system for details on locale reconfiguration.")
return usable_locales[0]
if __name__ == "__main__":
try:
# On MSYS2 we need to run idf.py with "winpty" in order to be able to cancel the subprocesses properly on
# keyboard interrupt (CTRL+C).
# Using an own global variable for indicating that we are running with "winpty" seems to be the most suitable
# option as os.environment['_'] contains "winpty" only when it is run manually from console.
WINPTY_VAR = "WINPTY"
WINPTY_EXE = "winpty"
if ("MSYSTEM" in os.environ) and (not os.environ.get("_", "").endswith(WINPTY_EXE)
and WINPTY_VAR not in os.environ):
if 'menuconfig' in sys.argv:
# don't use winpty for menuconfig because it will print weird characters
main()
else:
os.environ[WINPTY_VAR] = "1" # the value is of no interest to us
# idf.py calls itself with "winpty" and WINPTY global variable set
ret = subprocess.call([WINPTY_EXE, sys.executable] + sys.argv, env=os.environ)
if ret:
raise SystemExit(ret)
elif os.name == "posix" and not _valid_unicode_config():
# Trying to find best utf-8 locale available on the system and restart python with it
best_locale = _find_usable_locale()
print_warning(
"Your environment is not configured to handle unicode filenames outside of ASCII range."
" Environment variable LC_ALL is temporary set to %s for unicode support." % best_locale)
os.environ["LC_ALL"] = best_locale
ret = subprocess.call([sys.executable] + sys.argv, env=os.environ)
if ret:
raise SystemExit(ret)
else:
main()
except FatalError as e:
print(e, file=sys.stderr)
sys.exit(2)
|
the-stack_106_16589
|
# -*- coding: utf-8 -*-
import json
import queue
import random
import time
import pytest
import requests
from botocore.exceptions import ClientError
from localstack import config
from localstack.config import external_service_url
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.http import Request
from localstack.services.generic_proxy import ProxyListener
from localstack.services.infra import start_proxy
from localstack.services.install import SQS_BACKEND_IMPL
from localstack.services.sns.provider import SNSBackend
from localstack.utils import testutil
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
get_free_tcp_port,
get_service_protocol,
retry,
short_uid,
to_str,
wait_for_port_open,
)
from localstack.utils.testutil import check_expected_lambda_log_events_length
from .awslambda.functions import lambda_integration
from .awslambda.test_lambda import (
LAMBDA_RUNTIME_PYTHON36,
TEST_LAMBDA_FUNCTION_PREFIX,
TEST_LAMBDA_LIBS,
TEST_LAMBDA_PYTHON,
TEST_LAMBDA_PYTHON_ECHO,
)
TEST_TOPIC_NAME = "TestTopic_snsTest"
TEST_QUEUE_NAME = "TestQueue_snsTest"
TEST_QUEUE_DLQ_NAME = "TestQueue_DLQ_snsTest"
TEST_TOPIC_NAME_2 = "topic-test-2"
PUBLICATION_TIMEOUT = 0.500
PUBLICATION_RETRIES = 4
class TestSNSSubscription:
def test_python_lambda_subscribe_sns_topic(
self,
create_lambda_function,
sns_client,
lambda_su_role,
sns_topic,
logs_client,
lambda_client,
sqs_client,
sns_subscription,
):
function_name = f"{TEST_LAMBDA_FUNCTION_PREFIX}-{short_uid()}"
permission_id = f"test-statement-{short_uid()}"
subject = "[Subject] Test subject"
message = "Hello world."
topic_arn = sns_topic["Attributes"]["TopicArn"]
lambda_creation_response = create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_PYTHON_ECHO,
runtime=LAMBDA_RUNTIME_PYTHON36,
role=lambda_su_role,
)
lambda_arn = lambda_creation_response["CreateFunctionResponse"]["FunctionArn"]
lambda_client.add_permission(
FunctionName=function_name,
StatementId=permission_id,
Action="lambda:InvokeFunction",
Principal="sns.amazonaws.com",
SourceArn=topic_arn,
)
sns_subscription(
TopicArn=topic_arn,
Protocol="lambda",
Endpoint=lambda_arn,
)
sns_client.publish(TopicArn=topic_arn, Subject=subject, Message=message)
events = retry(
check_expected_lambda_log_events_length,
retries=10,
sleep=1,
function_name=function_name,
expected_length=1,
regex_filter="Records.*Sns",
logs_client=logs_client,
)
notification = events[0]["Records"][0]["Sns"]
assert "Subject" in notification
assert subject == notification["Subject"]
class TestSNSProvider:
def test_publish_unicode_chars(
self,
sns_client,
sns_create_topic,
sqs_create_queue,
sqs_client,
sqs_queue_arn,
sns_subscription,
):
topic_arn = sns_create_topic()["TopicArn"]
queue_url = sqs_create_queue()
queue_arn = sqs_queue_arn(queue_url)
sns_subscription(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn)
# publish message to SNS, receive it from SQS, assert that messages are equal
message = 'ö§a1"_!?,. £$-'
sns_client.publish(TopicArn=topic_arn, Message=message)
def check_message():
msgs = sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)
msg_received = msgs["Messages"][0]
msg_received = json.loads(to_str(msg_received["Body"]))
msg_received = msg_received["Message"]
assert message == msg_received
retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
def test_subscribe_http_endpoint(self, sns_client, sns_create_topic, sns_subscription):
topic_arn = sns_create_topic()["TopicArn"]
# create HTTP endpoint and connect it to SNS topic
class MyUpdateListener(ProxyListener):
def forward_request(self, method, path, data, headers):
records.append((json.loads(to_str(data)), headers))
return 200
records = []
local_port = get_free_tcp_port()
proxy = start_proxy(local_port, backend_url=None, update_listener=MyUpdateListener())
wait_for_port_open(local_port)
queue_arn = "%s://localhost:%s" % (get_service_protocol(), local_port)
sns_subscription(TopicArn=topic_arn, Protocol="http", Endpoint=queue_arn)
def received():
assert records[0][0]["Type"] == "SubscriptionConfirmation"
assert records[0][1]["x-amz-sns-message-type"] == "SubscriptionConfirmation"
token = records[0][0]["Token"]
subscribe_url = records[0][0]["SubscribeURL"]
assert subscribe_url == (
f"{external_service_url('sns')}/?Action=ConfirmSubscription&TopicArn={topic_arn}&Token={token}"
)
assert "Signature" in records[0][0]
assert "SigningCertURL" in records[0][0]
retry(received, retries=5, sleep=1)
proxy.stop()
def test_subscribe_with_invalid_protocol(self, sns_client, sns_create_topic, sns_subscription):
topic_arn = sns_create_topic(Name=TEST_TOPIC_NAME_2)["TopicArn"]
with pytest.raises(ClientError) as e:
sns_subscription(
TopicArn=topic_arn, Protocol="test-protocol", Endpoint="[email protected]"
)
assert e.value.response["ResponseMetadata"]["HTTPStatusCode"] == 400
assert e.value.response["Error"]["Code"] == "InvalidParameter"
def test_attribute_raw_subscribe(
self, sqs_client, sns_client, sns_create_topic, sqs_queue, sqs_queue_arn, sns_subscription
):
topic_arn = sns_create_topic()["TopicArn"]
# create SNS topic and connect it to an SQS queue
queue_url = sqs_queue
queue_arn = sqs_queue_arn(queue_url)
attributes = {"RawMessageDelivery": "True"}
sns_subscription(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=queue_arn,
Attributes=attributes,
)
# fetch subscription information
subscription_list = sns_client.list_subscriptions()
subscription_arn = ""
for subscription in subscription_list["Subscriptions"]:
if subscription["TopicArn"] == topic_arn:
subscription_arn = subscription["SubscriptionArn"]
actual_attributes = sns_client.get_subscription_attributes(
SubscriptionArn=subscription_arn
)["Attributes"]
# assert the attributes are well set
assert actual_attributes["RawMessageDelivery"]
# publish message to SNS, receive it from SQS, assert that messages are equal and that they are Raw
message = "This is a test message"
binary_attribute = b"\x02\x03\x04"
# extending this test case to test support for binary message attribute data
# https://github.com/localstack/localstack/issues/2432
sns_client.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={"store": {"DataType": "Binary", "BinaryValue": binary_attribute}},
)
def check_message():
msgs = sqs_client.receive_message(
QueueUrl=queue_url, MessageAttributeNames=["All"], VisibilityTimeout=0
)
msg_received = msgs["Messages"][0]
assert message == msg_received["Body"]
assert binary_attribute == msg_received["MessageAttributes"]["store"]["BinaryValue"]
retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
sns_client.unsubscribe(SubscriptionArn=subscription_arn)
def test_filter_policy(
self,
sqs_create_queue,
sqs_queue_arn,
sns_client,
sns_create_topic,
sqs_client,
sns_subscription,
):
# connect SNS topic to an SQS queue
queue_name = f"queue-{short_uid()}"
queue_url = sqs_create_queue(QueueName=queue_name)
queue_arn = sqs_queue_arn(queue_url)
topic_arn = sns_create_topic()["TopicArn"]
filter_policy = {"attr1": [{"numeric": [">", 0, "<=", 100]}]}
sns_subscription(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=queue_arn,
Attributes={"FilterPolicy": json.dumps(filter_policy)},
)
# get number of messages
num_msgs_0 = len(
sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0).get("Messages", [])
)
# publish message that satisfies the filter policy, assert that message is received
message = "This is a test message"
message_attributes = {"attr1": {"DataType": "Number", "StringValue": "99"}}
sns_client.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes=message_attributes,
)
def check_message():
msgs_1 = sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)["Messages"]
num_msgs_1 = len(msgs_1)
assert num_msgs_1 == (num_msgs_0 + 1)
return num_msgs_1
num_msgs_1 = retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# publish message that does not satisfy the filter policy, assert that message is not received
message = "This is another test message"
sns_client.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "111"}},
)
def check_message2():
num_msgs_2 = len(
sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)["Messages"]
)
assert num_msgs_2 == num_msgs_1
return num_msgs_2
retry(check_message2, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
def test_exists_filter_policy(
self,
sqs_create_queue,
sqs_queue_arn,
sns_create_topic,
sns_client,
sqs_client,
sns_subscription,
):
# connect SNS topic to an SQS queue
queue_name = f"queue-{short_uid()}"
queue_url = sqs_create_queue(QueueName=queue_name)
queue_arn = sqs_queue_arn(queue_url)
topic_arn = sns_create_topic()["TopicArn"]
filter_policy = {"store": [{"exists": True}]}
def do_subscribe(filter_policy, queue_arn):
sns_subscription(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=queue_arn,
Attributes={"FilterPolicy": json.dumps(filter_policy)},
)
do_subscribe(filter_policy, queue_arn)
# get number of messages
num_msgs_0 = len(
sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0).get("Messages", [])
)
# publish message that satisfies the filter policy, assert that message is received
message = f"message-{short_uid()}"
sns_client.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={
"store": {"DataType": "Number", "StringValue": "99"},
"def": {"DataType": "Number", "StringValue": "99"},
},
)
def check_message1():
num_msgs_1 = len(
sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)["Messages"]
)
assert num_msgs_1 == (num_msgs_0 + 1)
return num_msgs_1
num_msgs_1 = retry(check_message1, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# publish message that does not satisfy the filter policy, assert that message is not received
message = f"message-{short_uid()}"
sns_client.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "111"}},
)
def check_message2():
num_msgs_2 = len(
sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)["Messages"]
)
assert num_msgs_2 == num_msgs_1
return num_msgs_2
retry(check_message2, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# test with exist operator set to false.
queue_arn = aws_stack.sqs_queue_arn(TEST_QUEUE_NAME)
filter_policy = {"store": [{"exists": False}]}
do_subscribe(filter_policy, queue_arn)
# get number of messages
num_msgs_0 = len(sqs_client.receive_message(QueueUrl=queue_url).get("Messages", []))
# publish message with the attribute and see if its getting filtered.
message = f"message-{short_uid()}"
sns_client.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={
"store": {"DataType": "Number", "StringValue": "99"},
"def": {"DataType": "Number", "StringValue": "99"},
},
)
def check_message():
num_msgs_1 = len(
sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0).get(
"Messages", []
)
)
assert num_msgs_1 == num_msgs_0
return num_msgs_1
num_msgs_1 = retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# publish message that without the attribute and see if its getting filtered.
message = f"message-{short_uid()}"
sns_client.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "111"}},
)
def check_message3():
num_msgs_2 = len(
sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0).get(
"Messages", []
)
)
assert num_msgs_2 == num_msgs_1
return num_msgs_2
retry(check_message3, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
def test_subscribe_sqs_queue(
self,
sqs_create_queue,
sqs_queue_arn,
sns_create_topic,
sns_client,
sqs_client,
sns_subscription,
):
# TODO: check with non default external port
# connect SNS topic to an SQS queue
queue_name = f"queue-{short_uid()}"
queue_url = sqs_create_queue(QueueName=queue_name)
queue_arn = sqs_queue_arn(queue_url)
topic_arn = sns_create_topic()["TopicArn"]
# create subscription with filter policy
filter_policy = {"attr1": [{"numeric": [">", 0, "<=", 100]}]}
subscription = sns_subscription(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=queue_arn,
Attributes={"FilterPolicy": json.dumps(filter_policy)},
)
# publish message that satisfies the filter policy
message = "This is a test message"
sns_client.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "99.12"}},
)
# assert that message is received
def check_message():
messages = sqs_client.receive_message(
QueueUrl=queue_url, VisibilityTimeout=0, MessageAttributeNames=["All"]
)["Messages"]
message = messages[0]
assert message["MessageAttributes"]["attr1"]["StringValue"] == "99.12"
retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# clean up
sns_client.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"])
def test_subscribe_platform_endpoint(
self, sns_client, sqs_create_queue, sns_create_topic, sns_subscription
):
sns_backend = SNSBackend.get()
topic_arn = sns_create_topic()["TopicArn"]
app_arn = sns_client.create_platform_application(Name="app1", Platform="p1", Attributes={})[
"PlatformApplicationArn"
]
platform_arn = sns_client.create_platform_endpoint(
PlatformApplicationArn=app_arn, Token="token_1"
)["EndpointArn"]
# create subscription with filter policy
filter_policy = {"attr1": [{"numeric": [">", 0, "<=", 100]}]}
subscription = sns_subscription(
TopicArn=topic_arn,
Protocol="application",
Endpoint=platform_arn,
Attributes={"FilterPolicy": json.dumps(filter_policy)},
)
# publish message that satisfies the filter policy
message = "This is a test message"
sns_client.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "99.12"}},
)
# assert that message has been received
def check_message():
assert len(sns_backend.platform_endpoint_messages[platform_arn]) > 0
retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# clean up
sns_client.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"])
sns_client.delete_endpoint(EndpointArn=platform_arn)
sns_client.delete_platform_application(PlatformApplicationArn=app_arn)
def test_unknown_topic_publish(self, sns_client):
fake_arn = "arn:aws:sns:us-east-1:123456789012:i_dont_exist"
message = "This is a test message"
with pytest.raises(ClientError) as e:
sns_client.publish(TopicArn=fake_arn, Message=message)
assert e.value.response["Error"]["Code"] == "NotFound"
assert e.value.response["Error"]["Message"] == "Topic does not exist"
assert e.value.response["ResponseMetadata"]["HTTPStatusCode"] == 404
def test_publish_sms(self, sns_client):
response = sns_client.publish(PhoneNumber="+33000000000", Message="This is a SMS")
assert "MessageId" in response
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
def test_publish_target(self, sns_client):
response = sns_client.publish(
TargetArn="arn:aws:sns:us-east-1:000000000000:endpoint/APNS/abcdef/0f7d5971-aa8b-4bd5-b585-0826e9f93a66",
Message="This is a push notification",
)
assert "MessageId" in response
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
def test_tags(self, sns_client, sns_create_topic):
topic_arn = sns_create_topic()["TopicArn"]
sns_client.tag_resource(
ResourceArn=topic_arn,
Tags=[
{"Key": "123", "Value": "abc"},
{"Key": "456", "Value": "def"},
{"Key": "456", "Value": "def"},
],
)
tags = sns_client.list_tags_for_resource(ResourceArn=topic_arn)
distinct_tags = [
tag for idx, tag in enumerate(tags["Tags"]) if tag not in tags["Tags"][:idx]
]
# test for duplicate tags
assert len(tags["Tags"]) == len(distinct_tags)
assert len(tags["Tags"]) == 2
assert tags["Tags"][0]["Key"] == "123"
assert tags["Tags"][0]["Value"] == "abc"
assert tags["Tags"][1]["Key"] == "456"
assert tags["Tags"][1]["Value"] == "def"
sns_client.untag_resource(ResourceArn=topic_arn, TagKeys=["123"])
tags = sns_client.list_tags_for_resource(ResourceArn=topic_arn)
assert len(tags["Tags"]) == 1
assert tags["Tags"][0]["Key"] == "456"
assert tags["Tags"][0]["Value"] == "def"
sns_client.tag_resource(ResourceArn=topic_arn, Tags=[{"Key": "456", "Value": "pqr"}])
tags = sns_client.list_tags_for_resource(ResourceArn=topic_arn)
assert len(tags["Tags"]) == 1
assert tags["Tags"][0]["Key"] == "456"
assert tags["Tags"][0]["Value"] == "pqr"
def test_topic_subscription(self, sns_client, sns_create_topic, sns_subscription):
topic_arn = sns_create_topic()["TopicArn"]
subscription = sns_subscription(
TopicArn=topic_arn,
Protocol="email",
Endpoint="[email protected]",
)
sns_backend = SNSBackend.get()
def check_subscription():
subscription_arn = subscription["SubscriptionArn"]
subscription_obj = sns_backend.subscription_status[subscription_arn]
assert subscription_obj["Status"] == "Not Subscribed"
_token = subscription_obj["Token"]
sns_client.confirm_subscription(TopicArn=topic_arn, Token=_token)
assert subscription_obj["Status"] == "Subscribed"
retry(check_subscription, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
def test_sqs_topic_subscription_confirmation(
self, sns_client, sns_create_topic, sqs_create_queue, sqs_queue_arn, sns_subscription
):
topic_arn = sns_create_topic()["TopicArn"]
queue_arn = sqs_queue_arn(sqs_create_queue())
subscription = sns_subscription(
TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn, ReturnSubscriptionArn=True
)
def check_subscription():
subscription_arn = subscription["SubscriptionArn"]
subscription_attrs = sns_client.get_subscription_attributes(
SubscriptionArn=subscription_arn
)
assert subscription_attrs["Attributes"]["PendingConfirmation"] == "false"
retry(check_subscription, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
def test_dead_letter_queue(
self,
sns_client,
sqs_client,
sns_create_topic,
sqs_create_queue,
sqs_queue_arn,
create_lambda_function,
sns_subscription,
):
lambda_name = f"test-{short_uid()}"
lambda_arn = aws_stack.lambda_function_arn(lambda_name)
topic_arn = sns_create_topic()["TopicArn"]
queue_name = f"test-{short_uid()}"
queue_url = sqs_create_queue(QueueName=queue_name)
queue_arn = sqs_queue_arn(queue_url)
create_lambda_function(
func_name=lambda_name,
handler_file=TEST_LAMBDA_PYTHON,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON36,
DeadLetterConfig={"TargetArn": queue_arn},
)
sns_subscription(TopicArn=topic_arn, Protocol="lambda", Endpoint=lambda_arn)
payload = {
lambda_integration.MSG_BODY_RAISE_ERROR_FLAG: 1,
}
sns_client.publish(TopicArn=topic_arn, Message=json.dumps(payload))
def receive_dlq():
result = sqs_client.receive_message(
QueueUrl=queue_url, MessageAttributeNames=["All"], VisibilityTimeout=0
)
msg_attrs = result["Messages"][0]["MessageAttributes"]
assert len(result["Messages"]) > 0
assert "RequestID" in msg_attrs
assert "ErrorCode" in msg_attrs
assert "ErrorMessage" in msg_attrs
retry(receive_dlq, retries=8, sleep=2)
def test_redrive_policy_http_subscription(
self,
sns_client,
sns_create_topic,
sqs_client,
sqs_create_queue,
sqs_queue_arn,
sns_subscription,
):
# self.unsubscribe_all_from_sns()
dlq_name = f"dlq-{short_uid()}"
dlq_url = sqs_create_queue(QueueName=dlq_name)
dlq_arn = sqs_queue_arn(dlq_url)
topic_arn = sns_create_topic()["TopicArn"]
# create HTTP endpoint and connect it to SNS topic
class MyUpdateListener(ProxyListener):
def forward_request(self, method, path, data, headers):
records.append((json.loads(to_str(data)), headers))
return 200
records = []
local_port = get_free_tcp_port()
proxy = start_proxy(local_port, backend_url=None, update_listener=MyUpdateListener())
wait_for_port_open(local_port)
http_endpoint = f"{get_service_protocol()}://localhost:{local_port}"
subscription = sns_subscription(TopicArn=topic_arn, Protocol="http", Endpoint=http_endpoint)
sns_client.set_subscription_attributes(
SubscriptionArn=subscription["SubscriptionArn"],
AttributeName="RedrivePolicy",
AttributeValue=json.dumps({"deadLetterTargetArn": dlq_arn}),
)
proxy.stop()
# for some reason, it takes a long time to stop the proxy thread -> TODO investigate
time.sleep(5)
sns_client.publish(
TopicArn=topic_arn,
Message=json.dumps({"message": "test_redrive_policy"}),
)
def receive_dlq():
result = sqs_client.receive_message(QueueUrl=dlq_url, MessageAttributeNames=["All"])
assert len(result["Messages"]) > 0
assert (
json.loads(json.loads(result["Messages"][0]["Body"])["Message"][0])["message"]
== "test_redrive_policy"
)
retry(receive_dlq, retries=7, sleep=2.5)
def test_redrive_policy_lambda_subscription(
self,
sns_client,
sns_create_topic,
sqs_create_queue,
sqs_queue_arn,
create_lambda_function,
sqs_client,
sns_subscription,
):
# self.unsubscribe_all_from_sns()
dlq_name = f"dlq-{short_uid()}"
dlq_url = sqs_create_queue(QueueName=dlq_name)
dlq_arn = sqs_queue_arn(dlq_url)
topic_arn = sns_create_topic()["TopicArn"]
lambda_name = f"test-{short_uid()}"
lambda_arn = create_lambda_function(
func_name=lambda_name,
libs=TEST_LAMBDA_LIBS,
handler_file=TEST_LAMBDA_PYTHON,
runtime=LAMBDA_RUNTIME_PYTHON36,
)["CreateFunctionResponse"]["FunctionArn"]
subscription = sns_subscription(TopicArn=topic_arn, Protocol="lambda", Endpoint=lambda_arn)
sns_client.set_subscription_attributes(
SubscriptionArn=subscription["SubscriptionArn"],
AttributeName="RedrivePolicy",
AttributeValue=json.dumps({"deadLetterTargetArn": dlq_arn}),
)
testutil.delete_lambda_function(lambda_name)
sns_client.publish(
TopicArn=topic_arn,
Message=json.dumps({"message": "test_redrive_policy"}),
)
def receive_dlq():
result = sqs_client.receive_message(QueueUrl=dlq_url, MessageAttributeNames=["All"])
assert len(result["Messages"]) > 0
assert (
json.loads(json.loads(result["Messages"][0]["Body"])["Message"][0])["message"]
== "test_redrive_policy"
)
retry(receive_dlq, retries=10, sleep=2)
def test_redrive_policy_queue_subscription(
self,
sns_client,
sns_create_topic,
sqs_create_queue,
sqs_queue_arn,
sqs_client,
sns_subscription,
):
# self.unsubscribe_all_from_sns()
dlq_name = f"dlq-{short_uid()}"
dlq_url = sqs_create_queue(QueueName=dlq_name)
dlq_arn = sqs_queue_arn(dlq_url)
topic_arn = sns_create_topic()["TopicArn"]
invalid_queue_arn = aws_stack.sqs_queue_arn("invalid_queue")
# subscribe with an invalid queue ARN, to trigger event on DLQ below
subscription = sns_subscription(
TopicArn=topic_arn, Protocol="sqs", Endpoint=invalid_queue_arn
)
sns_client.set_subscription_attributes(
SubscriptionArn=subscription["SubscriptionArn"],
AttributeName="RedrivePolicy",
AttributeValue=json.dumps({"deadLetterTargetArn": dlq_arn}),
)
sns_client.publish(
TopicArn=topic_arn, Message=json.dumps({"message": "test_redrive_policy"})
)
def receive_dlq():
result = sqs_client.receive_message(QueueUrl=dlq_url, MessageAttributeNames=["All"])
assert len(result["Messages"]) > 0
assert (
json.loads(json.loads(result["Messages"][0]["Body"])["Message"][0])["message"]
== "test_redrive_policy"
)
retry(receive_dlq, retries=10, sleep=2)
def test_publish_with_empty_subject(self, sns_client, sns_create_topic):
topic_arn = sns_create_topic()["TopicArn"]
# Publish without subject
rs = sns_client.publish(TopicArn=topic_arn, Message=json.dumps({"message": "test_publish"}))
assert rs["ResponseMetadata"]["HTTPStatusCode"] == 200
with pytest.raises(ClientError) as e:
sns_client.publish(
TopicArn=topic_arn,
Subject="",
Message=json.dumps({"message": "test_publish"}),
)
assert e.value.response["Error"]["Code"] == "InvalidParameter"
def test_create_topic_test_arn(self, sns_create_topic, sns_client):
topic_name = f"topic-{short_uid()}"
response = sns_create_topic(Name=topic_name)
topic_arn_params = response["TopicArn"].split(":")
testutil.response_arn_matches_partition(sns_client, response["TopicArn"])
assert topic_arn_params[4] == TEST_AWS_ACCOUNT_ID
assert topic_arn_params[5] == topic_name
def test_publish_message_by_target_arn(
self, sns_client, sns_create_topic, create_lambda_function, sns_subscription
):
# self.unsubscribe_all_from_sns()
func_name = f"lambda-{short_uid()}"
topic_arn = sns_create_topic()["TopicArn"]
lambda_arn = create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=func_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)["CreateFunctionResponse"]["FunctionArn"]
subscription_arn = sns_subscription(
TopicArn=topic_arn, Protocol="lambda", Endpoint=lambda_arn
)["SubscriptionArn"]
sns_client.publish(TopicArn=topic_arn, Message="test_message_1", Subject="test subject")
# Lambda invoked 1 time
events = retry(
check_expected_lambda_log_events_length,
retries=3,
sleep=1,
function_name=func_name,
expected_length=1,
)
message = events[0]["Records"][0]
assert message["EventSubscriptionArn"] == subscription_arn
sns_client.publish(TargetArn=topic_arn, Message="test_message_2", Subject="test subject")
events = retry(
check_expected_lambda_log_events_length,
retries=3,
sleep=1,
function_name=func_name,
expected_length=2,
)
# Lambda invoked 1 more time
assert len(events) == 2
for event in events:
message = event["Records"][0]
assert message["EventSubscriptionArn"] == subscription_arn
def test_publish_message_after_subscribe_topic(
self,
sns_client,
sns_create_topic,
sqs_client,
sqs_create_queue,
sqs_queue_arn,
sns_subscription,
):
# self.unsubscribe_all_from_sns()
topic_arn = sns_create_topic()["TopicArn"]
queue_url = sqs_create_queue()
queue_arn = sqs_queue_arn(queue_url)
rs = sns_client.publish(
TopicArn=topic_arn, Subject="test subject", Message="test_message_1"
)
assert rs["ResponseMetadata"]["HTTPStatusCode"] == 200
sns_subscription(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn)
message_subject = "sqs subject"
message_body = "test_message_2"
rs = sns_client.publish(TopicArn=topic_arn, Subject=message_subject, Message=message_body)
# time.sleep(100)
assert rs["ResponseMetadata"]["HTTPStatusCode"] == 200
message_id = rs["MessageId"]
def get_message(q_url):
resp = sqs_client.receive_message(QueueUrl=q_url, VisibilityTimeout=0)
return json.loads(resp["Messages"][0]["Body"])
message = retry(get_message, retries=3, sleep=2, q_url=queue_url)
assert message["MessageId"] == message_id
assert message["Subject"] == message_subject
assert message["Message"] == message_body
def test_create_duplicate_topic_with_more_tags(self, sns_client, sns_create_topic):
topic_name = f"test-{short_uid()}"
sns_create_topic(Name=topic_name)
with pytest.raises(ClientError) as e:
sns_client.create_topic(Name=topic_name, Tags=[{"Key": "456", "Value": "pqr"}])
assert e.value.response["Error"]["Code"] == "InvalidParameter"
assert e.value.response["Error"]["Message"] == "Topic already exists with different tags"
assert e.value.response["ResponseMetadata"]["HTTPStatusCode"] == 400
def test_create_duplicate_topic_check_idempotentness(self, sns_create_topic):
topic_name = f"test-{short_uid()}"
tags = [{"Key": "a", "Value": "1"}, {"Key": "b", "Value": "2"}]
kwargs = [
{"Tags": tags}, # to create topic with two tags
{"Tags": tags}, # to create the same topic again with same tags
{"Tags": [tags[0]]}, # to create the same topic again with one of the tags from above
{"Tags": []}, # to create the same topic again with no tags
]
responses = []
for arg in kwargs:
responses.append(sns_create_topic(Name=topic_name, **arg))
# assert TopicArn is returned by all the above create_topic calls
for i in range(len(responses)):
assert "TopicArn" in responses[i]
def test_create_platform_endpoint_check_idempotentness(self, sns_client):
response = sns_client.create_platform_application(
Name=f"test-{short_uid()}",
Platform="GCM",
Attributes={"PlatformCredential": "123"},
)
kwargs_list = [
{"Token": "test1", "CustomUserData": "test-data"},
{"Token": "test1", "CustomUserData": "test-data"},
{"Token": "test1"},
{"Token": "test1"},
]
platform_arn = response["PlatformApplicationArn"]
responses = []
for kwargs in kwargs_list:
responses.append(
sns_client.create_platform_endpoint(PlatformApplicationArn=platform_arn, **kwargs)
)
# Assert endpointarn is returned in every call create platform call
for i in range(len(responses)):
assert "EndpointArn" in responses[i]
endpoint_arn = responses[0]["EndpointArn"]
# clean up
sns_client.delete_endpoint(EndpointArn=endpoint_arn)
sns_client.delete_platform_application(PlatformApplicationArn=platform_arn)
def test_publish_by_path_parameters(
self,
sns_create_topic,
sns_client,
sqs_client,
sqs_create_queue,
sqs_queue_arn,
sns_subscription,
):
topic_name = f"topic-{short_uid()}"
queue_name = f"queue-{short_uid()}"
message = f"test message {short_uid()}"
topic_arn = sns_create_topic(Name=topic_name)["TopicArn"]
base_url = (
f"{get_service_protocol()}://{config.LOCALSTACK_HOSTNAME}:{config.service_port('sns')}"
)
path = "Action=Publish&Version=2010-03-31&TopicArn={}&Message={}".format(topic_arn, message)
queue_url = sqs_create_queue(QueueName=queue_name)
queue_arn = sqs_queue_arn(queue_url)
subscription_arn = sns_subscription(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn)[
"SubscriptionArn"
]
r = requests.post(
url="{}/?{}".format(base_url, path),
headers=aws_stack.mock_aws_request_headers("sns"),
)
assert r.status_code == 200
def get_notification(q_url):
resp = sqs_client.receive_message(QueueUrl=q_url)
return json.loads(resp["Messages"][0]["Body"])
notification = retry(get_notification, retries=3, sleep=2, q_url=queue_url)
assert notification["TopicArn"] == topic_arn
assert notification["Message"] == message
sns_client.unsubscribe(SubscriptionArn=subscription_arn)
def test_multiple_subscriptions_http_endpoint(
self, sns_client, sns_create_topic, sns_subscription
):
# create a topic
topic_arn = sns_create_topic()["TopicArn"]
# build fake http server endpoints
_requests = queue.Queue()
# create HTTP endpoint and connect it to SNS topic
class MyUpdateListener(ProxyListener):
def forward_request(self, method, path, data, headers):
_requests.put(Request(method, path, headers=headers, body=data))
return 429
number_of_endpoints = 4
proxies = []
for _ in range(number_of_endpoints):
local_port = get_free_tcp_port()
proxies.append(
start_proxy(local_port, backend_url=None, update_listener=MyUpdateListener())
)
wait_for_port_open(local_port)
http_endpoint = f"{get_service_protocol()}://localhost:{local_port}"
sns_subscription(TopicArn=topic_arn, Protocol="http", Endpoint=http_endpoint)
# fetch subscription information
subscription_list = sns_client.list_subscriptions_by_topic(TopicArn=topic_arn)
assert subscription_list["ResponseMetadata"]["HTTPStatusCode"] == 200
assert (
len(subscription_list["Subscriptions"]) == number_of_endpoints
), f"unexpected number of subscriptions {subscription_list}"
for _ in range(number_of_endpoints):
request = _requests.get(timeout=2)
assert request.get_json(True)["TopicArn"] == topic_arn
with pytest.raises(queue.Empty):
# make sure only four requests are received
_requests.get(timeout=1)
for proxy in proxies:
proxy.stop()
def test_publish_sms_endpoint(self, sns_client, sns_create_topic, sns_subscription):
list_of_contacts = [
f"+{random.randint(100000000, 9999999999)}",
f"+{random.randint(100000000, 9999999999)}",
f"+{random.randint(100000000, 9999999999)}",
]
message = "Good news everyone!"
topic_arn = sns_create_topic()["TopicArn"]
for number in list_of_contacts:
sns_subscription(TopicArn=topic_arn, Protocol="sms", Endpoint=number)
sns_client.publish(Message=message, TopicArn=topic_arn)
sns_backend = SNSBackend.get()
def check_messages():
sms_messages = sns_backend.sms_messages
for contact in list_of_contacts:
sms_was_found = False
for message in sms_messages:
if message["endpoint"] == contact:
sms_was_found = True
break
assert sms_was_found
retry(check_messages, sleep=0.5)
def test_publish_sqs_from_sns(
self,
sns_client,
sns_create_topic,
sqs_client,
sqs_create_queue,
sqs_queue_arn,
sns_subscription,
):
topic_arn = sns_create_topic()["TopicArn"]
queue_url = sqs_create_queue()
queue_arn = sqs_queue_arn(queue_url)
subscription_arn = sns_subscription(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=queue_arn,
Attributes={"RawMessageDelivery": "true"},
)["SubscriptionArn"]
string_value = "99.12"
sns_client.publish(
TopicArn=topic_arn,
Message="Test msg",
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": string_value}},
)
def get_message_with_attributes(queue_url):
response = sqs_client.receive_message(
QueueUrl=queue_url, MessageAttributeNames=["All"], VisibilityTimeout=0
)
assert response["Messages"][0]["MessageAttributes"] == {
"attr1": {"DataType": "Number", "StringValue": string_value}
}
sqs_client.delete_message(
QueueUrl=queue_url, ReceiptHandle=response["Messages"][0]["ReceiptHandle"]
)
retry(get_message_with_attributes, retries=3, sleep=3, queue_url=queue_url)
sns_client.set_subscription_attributes(
SubscriptionArn=subscription_arn,
AttributeName="RawMessageDelivery",
AttributeValue="false",
)
string_value = "100.12"
sns_client.publish(
TargetArn=topic_arn,
Message="Test msg",
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": string_value}},
)
retry(get_message_with_attributes, retries=3, sleep=3, queue_url=queue_url)
def test_publish_batch_messages_from_sns_to_sqs(
self,
sns_client,
sns_create_topic,
sqs_create_queue,
sqs_queue_arn,
sqs_client,
sns_subscription,
):
topic_arn = sns_create_topic()["TopicArn"]
queue_url = sqs_create_queue()
queue_arn = sqs_queue_arn(queue_url)
sns_subscription(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=queue_arn,
Attributes={"RawMessageDelivery": "true"},
)
publish_batch_response = sns_client.publish_batch(
TopicArn=topic_arn,
PublishBatchRequestEntries=[
{
"Id": "1",
"Message": "Test Message with two attributes",
"Subject": "Subject",
"MessageAttributes": {
"attr1": {"DataType": "Number", "StringValue": "99.12"},
"attr2": {"DataType": "Number", "StringValue": "109.12"},
},
},
{
"Id": "2",
"Message": "Test Message with one attribute",
"Subject": "Subject",
"MessageAttributes": {"attr1": {"DataType": "Number", "StringValue": "19.12"}},
},
{
"Id": "3",
"Message": "Test Message without attribute",
"Subject": "Subject",
},
{
"Id": "4",
"Message": "Test Message without subject",
},
],
)
assert "Successful" in publish_batch_response
assert "Failed" in publish_batch_response
for successful_resp in publish_batch_response["Successful"]:
assert "Id" in successful_resp
assert "MessageId" in successful_resp
def get_messages(queue_url):
response = sqs_client.receive_message(
QueueUrl=queue_url,
MessageAttributeNames=["All"],
MaxNumberOfMessages=10,
VisibilityTimeout=0,
)
assert len(response["Messages"]) == 4
for message in response["Messages"]:
assert "Body" in message
if message["Body"] == "Test Message with two attributes":
assert len(message["MessageAttributes"]) == 2
assert message["MessageAttributes"]["attr1"] == {
"StringValue": "99.12",
"DataType": "Number",
}
assert message["MessageAttributes"]["attr2"] == {
"StringValue": "109.12",
"DataType": "Number",
}
elif message["Body"] == "Test Message with one attribute":
assert len(message["MessageAttributes"]) == 1
assert message["MessageAttributes"]["attr1"] == {
"StringValue": "19.12",
"DataType": "Number",
}
elif message["Body"] == "Test Message without attribute":
assert message.get("MessageAttributes") is None
retry(get_messages, retries=5, sleep=1, queue_url=queue_url)
def test_publish_batch_messages_from_fifo_topic_to_fifo_queue(
self, sns_client, sns_create_topic, sqs_client, sqs_create_queue, sns_subscription
):
topic_name = f"topic-{short_uid()}.fifo"
queue_name = f"queue-{short_uid()}.fifo"
topic_arn = sns_create_topic(Name=topic_name, Attributes={"FifoTopic": "true"})["TopicArn"]
queue_url = sqs_create_queue(
QueueName=queue_name,
Attributes={"FifoQueue": "true"},
)
sns_subscription(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=queue_url,
Attributes={"RawMessageDelivery": "true"},
)
message_group_id = "complexMessageGroupId"
publish_batch_response = sns_client.publish_batch(
TopicArn=topic_arn,
PublishBatchRequestEntries=[
{
"Id": "1",
"MessageGroupId": message_group_id,
"Message": "Test Message with two attributes",
"Subject": "Subject",
"MessageAttributes": {
"attr1": {"DataType": "Number", "StringValue": "99.12"},
"attr2": {"DataType": "Number", "StringValue": "109.12"},
},
},
{
"Id": "2",
"MessageGroupId": message_group_id,
"Message": "Test Message with one attribute",
"Subject": "Subject",
"MessageAttributes": {"attr1": {"DataType": "Number", "StringValue": "19.12"}},
},
{
"Id": "3",
"MessageGroupId": message_group_id,
"Message": "Test Message without attribute",
"Subject": "Subject",
},
],
)
assert "Successful" in publish_batch_response
assert "Failed" in publish_batch_response
for successful_resp in publish_batch_response["Successful"]:
assert "Id" in successful_resp
assert "MessageId" in successful_resp
def get_messages(queue_url):
response = sqs_client.receive_message(
QueueUrl=queue_url,
MessageAttributeNames=["All"],
AttributeNames=["All"],
MaxNumberOfMessages=10,
VisibilityTimeout=0,
)
assert len(response["Messages"]) == 3
for message in response["Messages"]:
assert "Body" in message
assert message["Attributes"]["MessageGroupId"] == message_group_id
if message["Body"] == "Test Message with two attributes":
assert len(message["MessageAttributes"]) == 2
assert message["MessageAttributes"]["attr1"] == {
"StringValue": "99.12",
"DataType": "Number",
}
assert message["MessageAttributes"]["attr2"] == {
"StringValue": "109.12",
"DataType": "Number",
}
elif message["Body"] == "Test Message with one attribute":
assert len(message["MessageAttributes"]) == 1
assert message["MessageAttributes"]["attr1"] == {
"StringValue": "19.12",
"DataType": "Number",
}
elif message["Body"] == "Test Message without attribute":
assert message.get("MessageAttributes") is None
retry(get_messages, retries=5, sleep=1, queue_url=queue_url)
def test_publish_batch_exceptions(
self, sns_client, sqs_client, sns_create_topic, sqs_create_queue, sns_subscription
):
topic_name = f"topic-{short_uid()}.fifo"
queue_name = f"queue-{short_uid()}.fifo"
topic_arn = sns_create_topic(Name=topic_name, Attributes={"FifoTopic": "true"})["TopicArn"]
queue_url = sqs_create_queue(
QueueName=queue_name,
Attributes={"FifoQueue": "true"},
)
queue_arn = aws_stack.sqs_queue_arn(queue_url)
sns_subscription(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=queue_arn,
Attributes={"RawMessageDelivery": "true"},
)
with pytest.raises(ClientError) as e:
sns_client.publish_batch(
TopicArn=topic_arn,
PublishBatchRequestEntries=[
{
"Id": "1",
"Message": "Test Message with two attributes",
}
],
)
assert e.value.response["Error"]["Code"] == "InvalidParameter"
assert e.value.response["ResponseMetadata"]["HTTPStatusCode"] == 400
with pytest.raises(ClientError) as e:
sns_client.publish_batch(
TopicArn=topic_arn,
PublishBatchRequestEntries=[
{"Id": f"Id_{i}", "Message": f"message_{i}"} for i in range(11)
],
)
assert e.value.response["Error"]["Code"] == "TooManyEntriesInBatchRequest"
assert e.value.response["ResponseMetadata"]["HTTPStatusCode"] == 400
with pytest.raises(ClientError) as e:
sns_client.publish_batch(
TopicArn=topic_arn,
PublishBatchRequestEntries=[
{"Id": "1", "Message": f"message_{i}"} for i in range(2)
],
)
assert e.value.response["Error"]["Code"] == "BatchEntryIdsNotDistinct"
assert e.value.response["ResponseMetadata"]["HTTPStatusCode"] == 400
def add_xray_header(self, request, **kwargs):
request.headers[
"X-Amzn-Trace-Id"
] = "Root=1-3152b799-8954dae64eda91bc9a23a7e8;Parent=7fa8c0f79203be72;Sampled=1"
def test_publish_sqs_from_sns_with_xray_propagation(
self, sns_client, sns_create_topic, sqs_client, sqs_create_queue, sns_subscription
):
# TODO: remove or adapt for asf
if SQS_BACKEND_IMPL != "elasticmq":
pytest.skip("not using elasticmq as SQS backend")
sns_client.meta.events.register("before-send.sns.Publish", self.add_xray_header)
topic = sns_create_topic()
topic_arn = topic["TopicArn"]
queue_url = sqs_create_queue()
sns_subscription(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_url)
sns_client.publish(TargetArn=topic_arn, Message="X-Ray propagation test msg")
response = sqs_client.receive_message(
QueueUrl=queue_url,
AttributeNames=["SentTimestamp", "AWSTraceHeader"],
MaxNumberOfMessages=1,
MessageAttributeNames=["All"],
VisibilityTimeout=2,
WaitTimeSeconds=2,
)
assert len(response["Messages"]) == 1
message = response["Messages"][0]
assert "Attributes" in message
assert "AWSTraceHeader" in message["Attributes"]
assert (
message["Attributes"]["AWSTraceHeader"]
== "Root=1-3152b799-8954dae64eda91bc9a23a7e8;Parent=7fa8c0f79203be72;Sampled=1"
)
def test_create_topic_after_delete_with_new_tags(self, sns_create_topic, sns_client):
topic_name = f"test-{short_uid()}"
topic = sns_create_topic(Name=topic_name, Tags=[{"Key": "Name", "Value": "pqr"}])
sns_client.delete_topic(TopicArn=topic["TopicArn"])
topic1 = sns_create_topic(Name=topic_name, Tags=[{"Key": "Name", "Value": "abc"}])
assert topic["TopicArn"] == topic1["TopicArn"]
def test_not_found_error_on_get_subscription_attributes(
self, sns_client, sns_create_topic, sqs_create_queue, sqs_queue_arn, sns_subscription
):
topic_arn = sns_create_topic()["TopicArn"]
queue_url = sqs_create_queue()
queue_arn = sqs_queue_arn(queue_url)
subscription = sns_subscription(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn)
subscription_attributes = sns_client.get_subscription_attributes(
SubscriptionArn=subscription["SubscriptionArn"]
)
assert (
subscription_attributes.get("Attributes").get("SubscriptionArn")
== subscription["SubscriptionArn"]
)
sns_client.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"])
with pytest.raises(ClientError) as e:
sns_client.get_subscription_attributes(SubscriptionArn=subscription["SubscriptionArn"])
assert e.value.response["Error"]["Code"] == "NotFound"
assert e.value.response["ResponseMetadata"]["HTTPStatusCode"] == 404
def test_message_to_fifo_sqs(
self,
sns_client,
sqs_client,
sns_create_topic,
sqs_create_queue,
sqs_queue_arn,
sns_subscription,
):
topic_name = f"topic-{short_uid()}.fifo"
queue_name = f"queue-{short_uid()}.fifo"
topic_arn = sns_create_topic(Name=topic_name, Attributes={"FifoTopic": "true"})["TopicArn"]
queue_url = sqs_create_queue(
QueueName=queue_name,
Attributes={"FifoQueue": "true"},
)
queue_arn = sqs_queue_arn(queue_url)
sns_subscription(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn)
message = "Test"
sns_client.publish(TopicArn=topic_arn, Message=message, MessageGroupId=short_uid())
def get_message():
received = sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)[
"Messages"
][0]["Body"]
assert json.loads(received)["Message"] == message
retry(get_message, retries=10, sleep_before=0.15, sleep=1)
def test_validations_for_fifo(
self,
sns_client,
sqs_client,
sns_create_topic,
sqs_create_queue,
sqs_queue_arn,
sns_subscription,
):
topic_name = f"topic-{short_uid()}"
fifo_topic_name = f"topic-{short_uid()}.fifo"
fifo_queue_name = f"queue-{short_uid()}.fifo"
topic_arn = sns_create_topic(Name=topic_name)["TopicArn"]
fifo_topic_arn = sns_create_topic(Name=fifo_topic_name, Attributes={"FifoTopic": "true"})[
"TopicArn"
]
fifo_queue_url = sqs_create_queue(
QueueName=fifo_queue_name, Attributes={"FifoQueue": "true"}
)
fifo_queue_arn = sqs_queue_arn(fifo_queue_url)
with pytest.raises(ClientError) as e:
sns_subscription(TopicArn=topic_arn, Protocol="sqs", Endpoint=fifo_queue_arn)
assert e.match("standard SNS topic")
with pytest.raises(ClientError) as e:
sns_client.publish(TopicArn=fifo_topic_arn, Message="test")
assert e.match("MessageGroupId")
def test_empty_sns_message(
self, sns_client, sqs_client, sns_topic, sqs_queue, sqs_queue_arn, sns_subscription
):
topic_arn = sns_topic["Attributes"]["TopicArn"]
queue_arn = sqs_queue_arn(sqs_queue)
sns_subscription(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn)
with pytest.raises(ClientError) as e:
sns_client.publish(Message="", TopicArn=topic_arn)
assert e.match("Empty message")
assert (
sqs_client.get_queue_attributes(
QueueUrl=sqs_queue, AttributeNames=["ApproximateNumberOfMessages"]
)["Attributes"]["ApproximateNumberOfMessages"]
== "0"
)
|
the-stack_106_16590
|
import random
import logging
import torch
from classla.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from classla.models.common.vocab import PAD_ID, VOCAB_PREFIX
from classla.models.pos.vocab import CharVocab, WordVocab
from classla.models.ner.vocab import TagVocab, MultiVocab
from classla.models.common.doc import Document, TEXT, NER
# Document class for conllu pipeline differs from the one in common.doc.
# TODO: unify classes?
from classla.pipeline.doc import Document as PipelineDocument
from classla.models.ner.utils import is_bio_scheme, to_bio2, bio2_to_bioes
class DataLoader:
def __init__(self, doc, batch_size, args, pretrain=None, vocab=None, evaluation=False, preprocess_tags=True):
self.batch_size = batch_size
self.args = args
self.eval = evaluation
self.shuffled = not self.eval
self.preprocess_tags = preprocess_tags
self.doc = doc
self.conll = None
if isinstance(self.doc, PipelineDocument):
self.conll = doc.conll_file
data = self.load_doc(self.doc)
self.tags = [[w[1] for w in sent] for sent in data]
# handle vocab
self.pretrain = pretrain
if vocab is None:
self.vocab = self.init_vocab(data)
else:
self.vocab = vocab
# filter and sample data
if args.get('sample_train', 1.0) < 1.0 and not self.eval:
keep = int(args['sample_train'] * len(data))
data = random.sample(data, keep)
data = self.preprocess(data, self.vocab, args)
# shuffle for training
if self.shuffled:
random.shuffle(data)
self.num_examples = len(data)
# chunk into batches
self.data = self.chunk_batches(data)
def init_vocab(self, data):
def from_model(model_filename):
""" Try loading vocab from charLM model file. """
state_dict = torch.load(model_filename, lambda storage, loc: storage)
assert 'vocab' in state_dict, "Cannot find vocab in charLM model file."
return state_dict['vocab']
if self.eval:
raise Exception("Vocab must exist for evaluation.")
if self.args['charlm']:
charvocab = CharVocab.load_state_dict(from_model(self.args['charlm_forward_file']))
else:
charvocab = CharVocab(data, self.args['shorthand'])
wordvocab = self.pretrain.vocab
tagvocab = TagVocab(data, self.args['shorthand'], idx=1)
vocab = MultiVocab({'char': charvocab,
'word': wordvocab,
'tag': tagvocab})
return vocab
def preprocess(self, data, vocab, args):
processed = []
if args.get('lowercase', True): # handle word case
case = lambda x: x.lower()
else:
case = lambda x: x
if args.get('char_lowercase', False): # handle character case
char_case = lambda x: x.lower()
else:
char_case = lambda x: x
for sent in data:
processed_sent = [vocab['word'].map([case(w[0]) for w in sent])]
processed_sent += [[vocab['char'].map([char_case(x) for x in w[0]]) for w in sent]]
processed_sent += [vocab['tag'].map([w[1] for w in sent])]
processed.append(processed_sent)
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 3 # words: List[List[int]], chars: List[List[List[int]]], tags: List[List[int]]
# sort sentences by lens for easy RNN operations
sentlens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, sentlens)
sentlens = [len(x) for x in batch[0]]
# sort chars by lens for easy char-LM operations
chars_forward, chars_backward, charoffsets_forward, charoffsets_backward, charlens = self.process_chars(batch[1])
chars_sorted, char_orig_idx = sort_all([chars_forward, chars_backward, charoffsets_forward, charoffsets_backward], charlens)
chars_forward, chars_backward, charoffsets_forward, charoffsets_backward = chars_sorted
charlens = [len(sent) for sent in chars_forward]
# sort words by lens for easy char-RNN operations
batch_words = [w for sent in batch[1] for w in sent]
wordlens = [len(x) for x in batch_words]
batch_words, word_orig_idx = sort_all([batch_words], wordlens)
batch_words = batch_words[0]
wordlens = [len(x) for x in batch_words]
# convert to tensors
words = get_long_tensor(batch[0], batch_size)
words_mask = torch.eq(words, PAD_ID)
wordchars = get_long_tensor(batch_words, len(wordlens))
wordchars_mask = torch.eq(wordchars, PAD_ID)
chars_forward = get_long_tensor(chars_forward, batch_size, pad_id=self.vocab['char'].unit2id(' '))
chars_backward = get_long_tensor(chars_backward, batch_size, pad_id=self.vocab['char'].unit2id(' '))
chars = torch.cat([chars_forward.unsqueeze(0), chars_backward.unsqueeze(0)]) # padded forward and backward char idx
charoffsets = [charoffsets_forward, charoffsets_backward] # idx for forward and backward lm to get word representation
tags = get_long_tensor(batch[2], batch_size)
return words, words_mask, wordchars, wordchars_mask, chars, tags, orig_idx, word_orig_idx, char_orig_idx, sentlens, wordlens, charlens, charoffsets
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def load_doc(self, doc):
if isinstance(doc, PipelineDocument):
data = doc.conll_file.get(['word', 'misc'], as_sentences=True)
else:
data = doc.get([TEXT, NER], as_sentences=True, from_token=True)
if self.preprocess_tags: # preprocess tags
data = self.process_tags(data)
return data
def process_tags(self, sentences):
res = []
# check if tag conversion is needed
convert_to_bioes = False
is_bio = is_bio_scheme([x[1] for sent in sentences for x in sent])
if is_bio and self.args.get('scheme', 'bio').lower() == 'bioes':
convert_to_bioes = True
# process tags
for sent in sentences:
words, tags = zip(*sent)
# NER field sanity checking
if any([x is None or x == '_' for x in tags]):
raise Exception("NER tag not found for some input data.")
# first ensure BIO2 scheme
tags = to_bio2(tags)
# then convert to BIOES
if convert_to_bioes:
tags = bio2_to_bioes(tags)
res.append([[w,t] for w,t in zip(words, tags)])
return res
def process_chars(self, sents):
start_id, end_id = self.vocab['char'].unit2id('\n'), self.vocab['char'].unit2id(' ') # special token
start_offset, end_offset = 1, 1
chars_forward, chars_backward, charoffsets_forward, charoffsets_backward = [], [], [], []
# get char representation for each sentence
for sent in sents:
chars_forward_sent, chars_backward_sent, charoffsets_forward_sent, charoffsets_backward_sent = [start_id], [start_id], [], []
# forward lm
for word in sent:
chars_forward_sent += word
charoffsets_forward_sent = charoffsets_forward_sent + [len(chars_forward_sent)] # add each token offset in the last for forward lm
chars_forward_sent += [end_id]
# backward lm
for word in sent[::-1]:
chars_backward_sent += word[::-1]
charoffsets_backward_sent = [len(chars_backward_sent)] + charoffsets_backward_sent # add each offset in the first for backward lm
chars_backward_sent += [end_id]
# store each sentence
chars_forward.append(chars_forward_sent)
chars_backward.append(chars_backward_sent)
charoffsets_forward.append(charoffsets_forward_sent)
charoffsets_backward.append(charoffsets_backward_sent)
charlens = [len(sent) for sent in chars_forward] # forward lm and backward lm should have the same lengths
return chars_forward, chars_backward, charoffsets_forward, charoffsets_backward, charlens
def reshuffle(self):
data = [y for x in self.data for y in x]
random.shuffle(data)
self.data = self.chunk_batches(data)
def chunk_batches(self, data):
data = [data[i:i+self.batch_size] for i in range(0, len(data), self.batch_size)]
return data
|
the-stack_106_16591
|
import os
import gzip
import json
import click
import pathlib
from pandas import read_csv, read_excel, DataFrame
from cli_util import DipException
class File:
def __init__(self, filename, ext, selected, view_name, is_cli=False):
self.view_name = view_name
self.filename = filename
self.ext = ext
self.selected = selected
self.cleaner = None
self.chunked = False
self.current_chunk = 0
self.dir_name = os.path.dirname(filename)
# Extras
try:
self.non_extension_part = os.path.split(filename)[-1].split('.')[0]
except Exception as e:
print(e.__str__())
self.non_extension_part = 'NewFile'
print('failed to extract non extension part from file name')
# ------ Pandas DataFrame ------ #
self.data: DataFrame = None
self.all_data: DataFrame = None
self.preview_data: DataFrame = None
self.transform_method_button: dict = {}
def save_data_to_file(self, output_data, destination_folder, params):
output_filename = None
sub_dir = ''
try:
sub_dir = self.dir_name[len(params.input_dir):]
pathlib.Path(os.path.join(destination_folder, sub_dir)).mkdir(parents=True, exist_ok=True)
except:
pass
try:
if params.output_format == 'json':
output_filename = os.path.join(destination_folder, sub_dir,
self.non_extension_part + '_processed.json')
params.output_filename = output_filename
self.save_json_file(output_data, params)
if params.output_format == 'csv':
output_filename = os.path.join(destination_folder, sub_dir,
self.non_extension_part + '_processed.csv')
params.output_filename = output_filename
self.save_csv_file(output_data, params)
except Exception as e:
message = f'Error while saving file to: {output_filename}. {e.__str__()}'
print(message)
raise DipException(message)
return output_filename
def save_json_file(self, output_data, params):
try:
self.write_to_json_file(output_data, params, 'utf-8')
except Exception as e:
print(e.__str__(), "\n", "Trying utf-8-sig encoding...")
self.write_to_json_file(output_data, params, 'utf-8-sig')
def save_csv_file(self, output_data, params):
try:
self.write_to_csv_file(output_data, params, 'utf-8')
except Exception as e:
print(e.__str__(), "\n", "Trying utf-8-sig encoding...")
self.write_to_csv_file(output_data, params, 'utf-8-sig')
def write_to_json_file(self, results, params, encoding):
indent = None
if params.pretty_json:
indent = 4
if params.compress:
with gzip.open(params.output_filename + '.gz', 'wt', encoding=encoding) as file:
results.to_json(file, force_ascii=False, orient='records', compression='gzip', indent=indent)
else:
if self.current_chunk == 0:
with open(params.output_filename, 'w', encoding=encoding) as file:
results.to_json(file, force_ascii=False, orient='records', indent=indent)
else:
with open(params.output_filename, 'r') as file:
jsn = json.load(file)
txt = results.to_json(force_ascii=False,
orient='records',
indent=indent)
txt = txt.encode(encoding)
new_json = json.loads(txt)
jsn = jsn + new_json
with open(params.output_filename, 'w', encoding=encoding) as file:
json.dump(jsn, file, ensure_ascii=False)
self.current_chunk += 1
def write_to_csv_file(self, results, params, encoding):
if params.compress and not self.chunked:
with gzip.open(params.output_filename + '.gz', 'wt', encoding=encoding) as file:
results.to_csv(file, compression='gzip',index=False)
else:
if self.current_chunk == 0:
results.to_csv(params.output_filename, index=False)
else:
results.to_csv(params.output_filename, mode='a', header=False, index=False)
print(f"Saved chunk {self.current_chunk + 1} to {params.output_filename}", end='\r')
self.current_chunk += 1
|
the-stack_106_16592
|
# Copyright (c) 2018 Pablo Moreno-Munoz
# Universidad Carlos III de Madrid and University of Sheffield
import numpy as np
from GPy.likelihoods import link_functions
from GPy.likelihoods import Likelihood
from scipy.stats import multinomial
from functools import reduce
from GPy.util.misc import safe_exp, safe_square
from scipy.misc import logsumexp
class Categorical(Likelihood):
"""
Categorical likelihood with K dimensional
Needs (K-1) latent functions (see Link Functions)
"""
def __init__(self, K, gp_link=None):
if gp_link is None:
gp_link = link_functions.Identity()
super(Categorical, self).__init__(gp_link, name='Categorical')
self.K = K
def pdf(self, F, y, Y_metadata=None):
Y_oneK = self.onehot(y)
eF = safe_exp(F)
den = 1 + eF.sum(1)[:, None]
p = eF / np.tile(den, eF.shape[1])
p = np.hstack((p, 1 / den))
p = np.clip(p, 1e-9, 1 - 1e-9)
p = p / np.tile(p.sum(1)[:,None], (1, p.shape[1]))
pdf = multinomial.pmf(x=Y_oneK, n=1, p=p)
return pdf
def logpdf(self, F, y, Y_metadata=None):
Y_oneK = self.onehot(y)
eF = safe_exp(F)
den = 1 + eF.sum(1)[:, None]
p = eF / np.tile(den, eF.shape[1])
p = np.hstack((p, 1 / den))
p = np.clip(p, 1e-9, 1- 1e-9)
p = p / np.tile(p.sum(1)[:,None], (1, p.shape[1]))
logpdf = multinomial.logpmf(x=Y_oneK, n=1, p=p)
return logpdf
def logpdf_sampling(self, F, y, Y_metadata=None):
Y_oneK = self.onehot(y)
eF = safe_exp(F)
den = 1 + eF.sum(1)[:, None, :]
p = eF / np.tile(den, (1, eF.shape[1] ,1))
p = np.hstack((p, 1 / den))
p = np.clip(p, 1e-9, 1 - 1e-9)
p = p / np.tile(p.sum(1)[:,None,:], (1, p.shape[1],1))
Y_oneK_rep = np.tile(Y_oneK, (eF.shape[2],1))
p_rep = np.empty((p.shape[0]*p.shape[2],p.shape[1]))
for s in range(p.shape[2]):
p_rep[s * p.shape[0]:(s * p.shape[0]) + p.shape[0], :] = p[:, :, s]
logpdf = multinomial.logpmf(x=Y_oneK_rep, n=1, p=p_rep)
logpdf = logpdf.reshape(p.shape[0], p.shape[2])
return logpdf
def samples(self, F, num_samples,Y_metadata=None):
eF = safe_exp(F)
den = 1 + eF.sum(1)[:, None]
p = eF / np.tile(den, eF.shape[1])
p = np.hstack((p, 1 / den))
p = np.clip(p, 1e-9, 1 - 1e-9)
p = p / np.tile(p.sum(1)[:,None], (1, p.shape[1]))
samples = np.empty((F.shape[0], self.K))
for i in range(F.shape[0]):
samples[i,:] = multinomial.rvs(n=1, p=p[i,:], size=1)
return self.invonehot(Y=samples)
def onehot(self, y):
# One-Hot Encoding of Categorical Data
Y_onehot = np.zeros((y.shape[0], self.K))
for k in range(self.K):
Y_onehot[:,k,None] = (y==k+1).astype(np.int)
return Y_onehot
def invonehot(self, Y):
# One-Hot Encoding of Categorical Data
ycat = np.where( Y == 1)[1] + 1
return ycat[:,None]
def rho_k(self, F, k):
# Probability of class k: P(y=k)
Kminus1 = F.shape[1]
eF = safe_exp(F)
rho = eF / (1 + np.tile(eF.sum(1)[:,None], (1, F.shape[1])))
rho = np.clip(rho, 1e-9, 1. - 1e-9) # numerical stability
rho = rho / np.tile(rho.sum(1)[:,None], (1, rho.shape[1]))
if k>Kminus1:
rho_k = 1 - rho.sum(1)
else:
rho_k = rho[:,k]
return rho_k
def dlogp_df(self, df, F, y, Y_metadata=None):
# df: indicated the derivated function f from F
Y_oneK = self.onehot(y)
eF = safe_exp(F)
den = 1 + eF.sum(1)[:, None]
p = eF[:, df, None] / den
p = np.clip(p, 1e-9, 1. - 1e-9) # numerical stability
p = p / np.tile(p.sum(1)[:,None], (1, p.shape[1]))
yp = Y_oneK*np.tile(p, (1, Y_oneK.shape[1])) #old, new is simpler
dlogp = Y_oneK[:,df,None] - yp.sum(1)[:,None] #old, new is simpler
#dlogp = Y_oneK[:,df,None] - p
return dlogp
def d2logp_df2(self, df, F, y, Y_metadata=None):
# df: indicated the derivated function f from F
Y_oneK = self.onehot(y)
eF = safe_exp(F)
den = 1 + eF.sum(1)[:, None]
num = F + np.tile(F[:,df,None],(1,F.shape[1]))
enum = safe_exp(num)
enum[:,df] = safe_exp(F[:,df])
num = enum.sum(1)[:,None]
p = num / safe_square(den) #añadir clip
#p = p / np.tile(p.sum(1), (1, p.shape[1]))
yp = Y_oneK*np.tile(p, (1, Y_oneK.shape[1])) #old, new is simpler
d2logp = - yp.sum(1)[:,None] #old, new is simpler
return d2logp
def var_exp(self, y, M, V, gh_points=None, Y_metadata=None):
# Variational Expectation
# gh: Gaussian-Hermite quadrature
if gh_points is None:
gh_f, gh_w = self._gh_points(T=10)
else:
gh_f, gh_w = gh_points
D = M.shape[1]
# grid-size and fd tuples
expanded_F_tuples = []
grid_tuple = [M.shape[0]]
for d in range(D):
grid_tuple.append(gh_f.shape[0])
expanded_fd_tuple = [1]*(D+1)
expanded_fd_tuple[d+1] = gh_f.shape[0]
expanded_F_tuples.append(tuple(expanded_fd_tuple))
# mean-variance tuple
mv_tuple = [1]*(D+1)
mv_tuple[0] = M.shape[0]
mv_tuple = tuple(mv_tuple)
# building, normalizing and reshaping the grids
F = np.zeros((reduce(lambda x, y: x * y, grid_tuple),D))
for d in range(D):
fd = np.zeros(tuple(grid_tuple))
fd[:] = np.reshape(gh_f, expanded_F_tuples[d])*np.sqrt(2*np.reshape(V[:,d],mv_tuple)) \
+ np.reshape(M[:,d],mv_tuple)
F[:,d,None] = fd.reshape(reduce(lambda x, y: x * y, grid_tuple), -1, order='C')
# function evaluation
Y_full = np.repeat(y, gh_f.shape[0]**D, axis=0)
logp = self.logpdf(F, Y_full)
logp = logp.reshape(tuple(grid_tuple))
# calculating quadrature
var_exp = logp.dot(gh_w) / np.sqrt(np.pi)
for d in range(D-1):
var_exp = var_exp.dot(gh_w) / np.sqrt(np.pi)
return var_exp[:,None]
def var_exp_derivatives(self, y, M, V, gh_points=None, Y_metadata=None):
# Variational Expectation
# gh: Gaussian-Hermite quadrature
if gh_points is None:
gh_f, gh_w = self._gh_points(T=10)
else:
gh_f, gh_w = gh_points
N = M.shape[0]
D = M.shape[1]
# grid-size and fd tuples
expanded_F_tuples = []
grid_tuple = [M.shape[0]]
for d in range(D):
grid_tuple.append(gh_f.shape[0])
expanded_fd_tuple = [1] * (D + 1)
expanded_fd_tuple[d + 1] = gh_f.shape[0]
expanded_F_tuples.append(tuple(expanded_fd_tuple))
# mean-variance tuple
mv_tuple = [1] * (D + 1)
mv_tuple[0] = M.shape[0]
mv_tuple = tuple(mv_tuple)
# building, normalizing and reshaping the grids
F = np.zeros((reduce(lambda x, y: x * y, grid_tuple), D))
for d in range(D):
fd = np.zeros(tuple(grid_tuple))
fd[:] = np.reshape(gh_f, expanded_F_tuples[d]) * np.sqrt(2 * np.reshape(V[:, d], mv_tuple)) \
+ np.reshape(M[:, d], mv_tuple)
F[:, d, None] = fd.reshape(reduce(lambda x, y: x * y, grid_tuple), -1, order='C')
# function evaluation
Y_full = np.repeat(y, gh_f.shape[0] ** D, axis=0)
var_exp_dm = np.empty((N,D))
var_exp_dv = np.empty((N,D))
for d in range(D):
# wrt to the mean
dlogp = self.dlogp_df(d, F, Y_full)
dlogp = dlogp.reshape(tuple(grid_tuple))
ve_dm = dlogp.dot(gh_w) / np.sqrt(np.pi)
# wrt to the variance
d2logp = self.d2logp_df2(d, F, Y_full)
d2logp = d2logp.reshape(tuple(grid_tuple))
ve_dv = d2logp.dot(gh_w) / np.sqrt(np.pi)
for fd in range(D - 1):
ve_dm = ve_dm.dot(gh_w) / np.sqrt(np.pi)
ve_dv = ve_dv.dot(gh_w) / np.sqrt(np.pi)
var_exp_dm[:,d] = ve_dm
var_exp_dv[:,d] = 0.5 * ve_dv
return var_exp_dm, var_exp_dv
def predictive(self, M, V, gh_points=None, Y_metadata=None):
# Variational Expectation
# gh: Gaussian-Hermite quadrature
if gh_points is None:
gh_f, gh_w = self._gh_points(T=10)
else:
gh_f, gh_w = gh_points
N = M.shape[0]
D = M.shape[1]
# grid-size and fd tuples
expanded_F_tuples = []
grid_tuple = [M.shape[0]]
for d in range(D):
grid_tuple.append(gh_f.shape[0])
expanded_fd_tuple = [1] * (D + 1)
expanded_fd_tuple[d + 1] = gh_f.shape[0]
expanded_F_tuples.append(tuple(expanded_fd_tuple))
# mean-variance tuple
mv_tuple = [1] * (D + 1)
mv_tuple[0] = M.shape[0]
mv_tuple = tuple(mv_tuple)
# building, normalizing and reshaping the grids
F = np.zeros((reduce(lambda x, y: x * y, grid_tuple), D))
for d in range(D):
fd = np.zeros(tuple(grid_tuple))
fd[:] = np.reshape(gh_f, expanded_F_tuples[d]) * np.sqrt(2 * np.reshape(V[:, d], mv_tuple)) \
+ np.reshape(M[:, d], mv_tuple)
F[:, d, None] = fd.reshape(reduce(lambda x, y: x * y, grid_tuple), -1, order='C')
# function evaluation
mean_pred = np.empty((N, D))
var_pred = np.zeros((N, D))
for d in range(D):
# wrt to the mean
mean_k = self.rho_k(F, d)
mean_k = mean_k.reshape(tuple(grid_tuple))
mean_pred_k = mean_k.dot(gh_w) / np.sqrt(np.pi)
# wrt to the variance
# NOT IMPLEMENTED
for fd in range(D - 1):
mean_pred_k = mean_pred_k.dot(gh_w) / np.sqrt(np.pi)
mean_pred[:, d] = mean_pred_k
return mean_pred, var_pred
def log_predictive(self, Ytest, mu_F_star, v_F_star, num_samples):
Ntest, D = mu_F_star.shape
F_samples = np.empty((Ntest, D, num_samples))
# function samples:
for d in range(D):
mu_fd_star = mu_F_star[:, d, None]
var_fd_star = v_F_star[:, d, None]
F_samples[:, d, :] = np.random.normal(mu_fd_star, np.sqrt(var_fd_star), size=(Ntest, num_samples))
# monte-carlo:
log_pred = -np.log(num_samples) + logsumexp(self.logpdf_sampling(F_samples, Ytest), axis=-1)
log_pred = np.array(log_pred).reshape(*Ytest.shape)
log_predictive = (1/num_samples)*log_pred.sum()
return log_predictive
def get_metadata(self):
dim_y = 1
dim_f = self.K - 1
dim_p = self.K - 1
return dim_y, dim_f, dim_p
def ismulti(self):
# Returns if the distribution is multivariate
return True
|
the-stack_106_16593
|
# --------------------------------------------------------
# DenseCap-Tensorflow
# Written by InnerPeace
# This file is adapted from Linjie's work
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import sys
# sys.path.append("..")
import os
import os.path as osp
# import PIL
from PIL import Image
import six
from six.moves import xrange
from lib.utils.cython_bbox import bbox_overlaps
import numpy as np
import scipy.sparse
from lib.config import cfg
# TODO: delete irrelevant codes
class imdb(object):
"""Image database."""
def __init__(self, name):
self._name = name
self._num_classes = 0
self._classes = []
self._image_index = []
self._obj_proposer = 'selective_search'
self._roidb = None
self._roidb_handler = self.default_roidb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
def set_proposal_method(self, method):
method = eval('self.' + method + '_roidb')
self.roidb_handler = method
@property
def roidb(self):
# A roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def _get_widths(self):
return [Image.open(self.image_path_at(i)).size[0]
for i in xrange(self.num_images)]
def append_flipped_images(self):
num_images = self.num_images
widths = self._get_widths()
for i in xrange(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes': boxes,
'gt_overlaps': self.roidb[i]['gt_overlaps'],
'gt_classes': self.roidb[i]['gt_classes'],
'flipped': True,
'gt_phrases': self.roidb[i]['gt_phrases']}
self.roidb.append(entry)
self._image_index = self._image_index * 2
def evaluate_recall(self, candidate_boxes=None, thresholds=None,
area='all', limit=None):
"""Evaluate detection proposal recall metrics.
Returns:
results: dictionary of results with keys
'ar': average recall
'recalls': vector recalls at each IoU overlap threshold
'thresholds': vector of IoU overlap thresholds
'gt_overlaps': vector of all ground-truth overlaps
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,
'96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}
area_ranges = [[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2], # 512-inf
]
assert areas.has_key(area), 'unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = np.zeros(0)
num_pos = 0
for i in xrange(self.num_images):
# Checking for max_overlaps == 1 avoids including crowd annotations
# (...pretty hacking :/)
max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)
gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &
(max_gt_overlaps == 1))[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
gt_areas = self.roidb[i]['seg_areas'][gt_inds]
valid_gt_inds = np.where((gt_areas >= area_range[0]) &
(gt_areas <= area_range[1]))[0]
gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(valid_gt_inds)
if candidate_boxes is None:
# If candidate_boxes is not supplied, the default is to use the
# non-ground-truth boxes from this roidb
non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]
boxes = self.roidb[i]['boxes'][non_gt_inds, :]
else:
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
if limit is not None and boxes.shape[0] > limit:
boxes = boxes[:limit, :]
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in xrange(gt_boxes.shape[0]):
# find which proposal box maximally covers each gt box
argmax_overlaps = overlaps.argmax(axis=0)
# and get the iou amount of coverage for each gt box
max_overlaps = overlaps.max(axis=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert (gt_ovr >= 0)
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert (_gt_overlaps[j] == gt_ovr)
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,
'gt_overlaps': gt_overlaps}
def create_roidb_from_box_list(self, box_list, gt_roidb):
assert len(box_list) == self.num_images, \
'Number of boxes must match number of ground-truth images'
roidb = []
for i in xrange(self.num_images):
boxes = box_list[i]
num_boxes = boxes.shape[0]
overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:
gt_boxes = gt_roidb[i]['boxes']
gt_classes = gt_roidb[i]['gt_classes']
gt_overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
argmaxes = gt_overlaps.argmax(axis=1)
maxes = gt_overlaps.max(axis=1)
I = np.where(maxes > 0)[0]
overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
overlaps = scipy.sparse.csr_matrix(overlaps)
roidb.append({
'boxes': boxes,
'gt_classes': np.zeros((num_boxes,), dtype=np.int32),
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': np.zeros((num_boxes,), dtype=np.float32),
})
return roidb
@staticmethod
def merge_roidbs(a, b):
assert len(a) == len(b)
for i in xrange(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],
b[i]['gt_classes']))
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
b[i]['gt_overlaps']])
a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],
b[i]['seg_areas']))
return a
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
|
the-stack_106_16594
|
PREAMBLE=[1,12,0,20,8,16]
def get_nth_word_spoken(preamble: list[int], target: int) -> int:
lookup = {val: pos for pos, val in enumerate(preamble[:-1])}
last_number = preamble[-1]
# start at the last number
for num in range(len(preamble)-1, target-1):
last_numbers_position = lookup.get(last_number, num)
lookup[last_number] = num
last_number = num - last_numbers_position
return last_number
if __name__ == "__main__":
print(get_nth_word_spoken(PREAMBLE, 2020))
print(get_nth_word_spoken(PREAMBLE, 30000000))
|
the-stack_106_16595
|
from django.core.exceptions import ImproperlyConfigured
class BaseIntegration:
required_credentials = []
def __init__(self, **credentials):
for key, value in credentials.items():
setattr(self, key, value)
for credential in self.required_credentials:
if not hasattr(self, credential):
raise ImproperlyConfigured(
'`{}` must be set to use {} integration'.format(credential, self.__class__.__name__)
)
def subscribe(self, subscriber):
raise NotImplementedError
def unsubscribe(self, subscriber, delete=False):
raise NotImplementedError
|
the-stack_106_16596
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
from imp import load_source
from multiprocessing import cpu_count
from os.path import basename, dirname, isdir, isfile, join
from urllib import quote
import click
import semantic_version
from platformio import __version__, app, exception, util
from platformio.managers.core import get_core_package_dir
from platformio.managers.package import BasePkgManager, PackageManager
class PlatformManager(BasePkgManager):
FILE_CACHE_VALID = None # disable platform download caching
def __init__(self, package_dir=None, repositories=None):
if not repositories:
repositories = [
"https://dl.bintray.com/platformio/dl-platforms/manifest.json",
"{0}://dl.platformio.org/platforms/manifest.json".format(
"https" if app.get_setting("enable_ssl") else "http")
]
BasePkgManager.__init__(
self, package_dir or join(util.get_home_dir(), "platforms"),
repositories)
@property
def manifest_names(self):
return ["platform.json"]
def get_manifest_path(self, pkg_dir):
if not isdir(pkg_dir):
return None
for name in self.manifest_names:
manifest_path = join(pkg_dir, name)
if isfile(manifest_path):
return manifest_path
return None
def install(self,
name,
requirements=None,
with_packages=None,
without_packages=None,
skip_default_package=False,
after_update=False,
silent=False,
force=False,
**_): # pylint: disable=too-many-arguments, arguments-differ
platform_dir = BasePkgManager.install(
self, name, requirements, silent=silent, force=force)
p = PlatformFactory.newPlatform(platform_dir)
# don't cleanup packages or install them after update
# we check packages for updates in def update()
if after_update:
return True
p.install_packages(
with_packages,
without_packages,
skip_default_package,
silent=silent,
force=force)
return self.cleanup_packages(p.packages.keys())
def uninstall(self, package, requirements=None, after_update=False):
if isdir(package):
pkg_dir = package
else:
name, requirements, url = self.parse_pkg_uri(package, requirements)
pkg_dir = self.get_package_dir(name, requirements, url)
if not pkg_dir:
raise exception.UnknownPlatform(package)
p = PlatformFactory.newPlatform(pkg_dir)
BasePkgManager.uninstall(self, pkg_dir, requirements)
# don't cleanup packages or install them after update
# we check packages for updates in def update()
if after_update:
return True
return self.cleanup_packages(p.packages.keys())
def update( # pylint: disable=arguments-differ
self,
package,
requirements=None,
only_check=False,
only_packages=False):
if isdir(package):
pkg_dir = package
else:
name, requirements, url = self.parse_pkg_uri(package, requirements)
pkg_dir = self.get_package_dir(name, requirements, url)
if not pkg_dir:
raise exception.UnknownPlatform(package)
p = PlatformFactory.newPlatform(pkg_dir)
pkgs_before = p.get_installed_packages().keys()
missed_pkgs = set()
if not only_packages:
BasePkgManager.update(self, pkg_dir, requirements, only_check)
p = PlatformFactory.newPlatform(pkg_dir)
missed_pkgs = set(pkgs_before) & set(p.packages.keys())
missed_pkgs -= set(p.get_installed_packages().keys())
p.update_packages(only_check)
self.cleanup_packages(p.packages.keys())
if missed_pkgs:
p.install_packages(
with_packages=list(missed_pkgs), skip_default_package=True)
return True
def cleanup_packages(self, names):
self.cache_reset()
deppkgs = {}
for manifest in PlatformManager().get_installed():
p = PlatformFactory.newPlatform(manifest['__pkg_dir'])
for pkgname, pkgmanifest in p.get_installed_packages().items():
if pkgname not in deppkgs:
deppkgs[pkgname] = set()
deppkgs[pkgname].add(pkgmanifest['version'])
pm = PackageManager(join(util.get_home_dir(), "packages"))
for manifest in pm.get_installed():
if manifest['name'] not in names:
continue
if (manifest['name'] not in deppkgs
or manifest['version'] not in deppkgs[manifest['name']]):
try:
pm.uninstall(manifest['__pkg_dir'], after_update=True)
except exception.UnknownPackage:
pass
self.cache_reset()
return True
@util.memoized(expire=5000)
def get_installed_boards(self):
boards = []
for manifest in self.get_installed():
p = PlatformFactory.newPlatform(manifest['__pkg_dir'])
for config in p.get_boards().values():
board = config.get_brief_data()
if board not in boards:
boards.append(board)
return boards
@staticmethod
@util.memoized()
def get_registered_boards():
return util.get_api_result("/boards", cache_valid="7d")
def get_all_boards(self):
boards = self.get_installed_boards()
know_boards = ["%s:%s" % (b['platform'], b['id']) for b in boards]
try:
for board in self.get_registered_boards():
key = "%s:%s" % (board['platform'], board['id'])
if key not in know_boards:
boards.append(board)
except (exception.APIRequestError, exception.InternetIsOffline):
pass
return sorted(boards, key=lambda b: b['name'])
def board_config(self, id_, platform=None):
for manifest in self.get_installed_boards():
if manifest['id'] == id_ and (not platform
or manifest['platform'] == platform):
return manifest
for manifest in self.get_registered_boards():
if manifest['id'] == id_ and (not platform
or manifest['platform'] == platform):
return manifest
raise exception.UnknownBoard(id_)
class PlatformFactory(object):
@staticmethod
def get_clsname(name):
name = re.sub(r"[^\da-z\_]+", "", name, flags=re.I)
return "%s%sPlatform" % (name.upper()[0], name.lower()[1:])
@staticmethod
def load_module(name, path):
module = None
try:
module = load_source("platformio.managers.platform.%s" % name,
path)
except ImportError:
raise exception.UnknownPlatform(name)
return module
@classmethod
def newPlatform(cls, name, requirements=None):
pm = PlatformManager()
platform_dir = None
if isdir(name):
platform_dir = name
name = pm.load_manifest(platform_dir)['name']
elif name.endswith("platform.json") and isfile(name):
platform_dir = dirname(name)
name = util.load_json(name)['name']
else:
name, requirements, url = pm.parse_pkg_uri(name, requirements)
platform_dir = pm.get_package_dir(name, requirements, url)
if platform_dir:
name = pm.load_manifest(platform_dir)['name']
if not platform_dir:
raise exception.UnknownPlatform(
name if not requirements else "%s@%s" % (name, requirements))
platform_cls = None
if isfile(join(platform_dir, "platform.py")):
platform_cls = getattr(
cls.load_module(name, join(platform_dir, "platform.py")),
cls.get_clsname(name))
else:
platform_cls = type(
str(cls.get_clsname(name)), (PlatformBase, ), {})
_instance = platform_cls(join(platform_dir, "platform.json"))
assert isinstance(_instance, PlatformBase)
return _instance
class PlatformPackagesMixin(object):
def install_packages( # pylint: disable=too-many-arguments
self,
with_packages=None,
without_packages=None,
skip_default_package=False,
silent=False,
force=False):
with_packages = set(self.find_pkg_names(with_packages or []))
without_packages = set(self.find_pkg_names(without_packages or []))
upkgs = with_packages | without_packages
ppkgs = set(self.packages.keys())
if not upkgs.issubset(ppkgs):
raise exception.UnknownPackage(", ".join(upkgs - ppkgs))
for name, opts in self.packages.items():
version = opts.get("version", "")
if name in without_packages:
continue
elif (name in with_packages or
not (skip_default_package or opts.get("optional", False))):
if ":" in version:
self.pm.install(
"%s=%s" % (name, version), silent=silent, force=force)
else:
self.pm.install(name, version, silent=silent, force=force)
return True
def find_pkg_names(self, candidates):
result = []
for candidate in candidates:
found = False
# lookup by package types
for _name, _opts in self.packages.items():
if _opts.get("type") == candidate:
result.append(_name)
found = True
if (self.frameworks and candidate.startswith("framework-")
and candidate[10:] in self.frameworks):
result.append(self.frameworks[candidate[10:]]['package'])
found = True
if not found:
result.append(candidate)
return result
def update_packages(self, only_check=False):
for name, manifest in self.get_installed_packages().items():
requirements = self.packages[name].get("version", "")
if ":" in requirements:
_, requirements, __ = self.pm.parse_pkg_uri(requirements)
self.pm.update(manifest['__pkg_dir'], requirements, only_check)
def get_installed_packages(self):
items = {}
for name in self.packages:
pkg_dir = self.get_package_dir(name)
if pkg_dir:
items[name] = self.pm.load_manifest(pkg_dir)
return items
def are_outdated_packages(self):
for name, manifest in self.get_installed_packages().items():
requirements = self.packages[name].get("version", "")
if ":" in requirements:
_, requirements, __ = self.pm.parse_pkg_uri(requirements)
if self.pm.outdated(manifest['__pkg_dir'], requirements):
return True
return False
def get_package_dir(self, name):
version = self.packages[name].get("version", "")
if ":" in version:
return self.pm.get_package_dir(
*self.pm.parse_pkg_uri("%s=%s" % (name, version)))
return self.pm.get_package_dir(name, version)
def get_package_version(self, name):
pkg_dir = self.get_package_dir(name)
if not pkg_dir:
return None
return self.pm.load_manifest(pkg_dir).get("version")
class PlatformRunMixin(object):
LINE_ERROR_RE = re.compile(r"(^|\s+)error:?\s+", re.I)
def run(self, variables, targets, silent, verbose):
assert isinstance(variables, dict)
assert isinstance(targets, list)
self.configure_default_packages(variables, targets)
self.install_packages(silent=True)
self.silent = silent
self.verbose = verbose or app.get_setting("force_verbose")
if "clean" in targets:
targets = ["-c", "."]
variables['platform_manifest'] = self.manifest_path
if "build_script" not in variables:
variables['build_script'] = self.get_build_script()
if not isfile(variables['build_script']):
raise exception.BuildScriptNotFound(variables['build_script'])
result = self._run_scons(variables, targets)
assert "returncode" in result
return result
def _run_scons(self, variables, targets):
cmd = [
util.get_pythonexe_path(),
join(get_core_package_dir("tool-scons"), "script", "scons"), "-Q",
"-j %d" % self.get_job_nums(), "--warn=no-no-parallel-support",
"-f",
join(util.get_source_dir(), "builder", "main.py")
]
cmd.append("PIOVERBOSE=%d" % (1 if self.verbose else 0))
cmd += targets
# encode and append variables
for key, value in variables.items():
cmd.append("%s=%s" % (key.upper(), base64.b64encode(value)))
util.copy_pythonpath_to_osenv()
result = util.exec_command(
cmd,
stdout=util.AsyncPipe(self.on_run_out),
stderr=util.AsyncPipe(self.on_run_err))
return result
def on_run_out(self, line):
if "`buildprog' is up to date." in line:
return
self._echo_line(line, level=1)
def on_run_err(self, line):
is_error = self.LINE_ERROR_RE.search(line) is not None
self._echo_line(line, level=3 if is_error else 2)
a_pos = line.find("fatal error:")
b_pos = line.rfind(": No such file or directory")
if a_pos == -1 or b_pos == -1:
return
self._echo_missed_dependency(line[a_pos + 12:b_pos].strip())
def _echo_line(self, line, level):
if line.startswith("scons: "):
line = line[7:]
assert 1 <= level <= 3
if self.silent and (level < 2 or not line):
return
fg = (None, "yellow", "red")[level - 1]
if level == 1 and "is up to date" in line:
fg = "green"
click.secho(line, fg=fg, err=level > 1)
@staticmethod
def _echo_missed_dependency(filename):
if "/" in filename or not filename.endswith((".h", ".hpp")):
return
banner = """
{dots}
* Looking for {filename_styled} dependency? Check our library registry!
*
* CLI > platformio lib search "header:{filename}"
* Web > {link}
*
{dots}
""".format(filename=filename,
filename_styled=click.style(filename, fg="cyan"),
link=click.style(
"https://platformio.org/lib/search?query=header:%s" % quote(
filename, safe=""),
fg="blue"),
dots="*" * (56 + len(filename)))
click.echo(banner, err=True)
@staticmethod
def get_job_nums():
try:
return cpu_count()
except NotImplementedError:
return 1
class PlatformBase( # pylint: disable=too-many-public-methods
PlatformPackagesMixin, PlatformRunMixin):
PIO_VERSION = semantic_version.Version(util.pepver_to_semver(__version__))
_BOARDS_CACHE = {}
def __init__(self, manifest_path):
self._BOARDS_CACHE = {}
self.manifest_path = manifest_path
self._manifest = util.load_json(manifest_path)
self.pm = PackageManager(
join(util.get_home_dir(), "packages"),
self._manifest.get("packageRepositories"))
self.silent = False
self.verbose = False
if self.engines and "platformio" in self.engines:
if self.PIO_VERSION not in semantic_version.Spec(
self.engines['platformio']):
raise exception.IncompatiblePlatform(self.name,
str(self.PIO_VERSION))
@property
def name(self):
return self._manifest['name']
@property
def title(self):
return self._manifest['title']
@property
def description(self):
return self._manifest['description']
@property
def version(self):
return self._manifest['version']
@property
def homepage(self):
return self._manifest.get("homepage")
@property
def vendor_url(self):
return self._manifest.get("url")
@property
def docs_url(self):
return self._manifest.get("docs")
@property
def repository_url(self):
return self._manifest.get("repository", {}).get("url")
@property
def license(self):
return self._manifest.get("license")
@property
def frameworks(self):
return self._manifest.get("frameworks")
@property
def engines(self):
return self._manifest.get("engines")
@property
def manifest(self):
return self._manifest
@property
def packages(self):
if "packages" not in self._manifest:
self._manifest['packages'] = {}
return self._manifest['packages']
def get_dir(self):
return dirname(self.manifest_path)
def get_build_script(self):
main_script = join(self.get_dir(), "builder", "main.py")
if isfile(main_script):
return main_script
raise NotImplementedError()
def is_embedded(self):
for opts in self.packages.values():
if opts.get("type") == "uploader":
return True
return False
def get_boards(self, id_=None):
def _append_board(board_id, manifest_path):
config = PlatformBoardConfig(manifest_path)
if "platform" in config and config.get("platform") != self.name:
return
elif "platforms" in config \
and self.name not in config.get("platforms"):
return
config.manifest['platform'] = self.name
self._BOARDS_CACHE[board_id] = config
bdirs = [
util.get_projectboards_dir(),
join(util.get_home_dir(), "boards"),
join(self.get_dir(), "boards"),
]
if id_ is None:
for boards_dir in bdirs:
if not isdir(boards_dir):
continue
for item in sorted(os.listdir(boards_dir)):
_id = item[:-5]
if not item.endswith(".json") or _id in self._BOARDS_CACHE:
continue
_append_board(_id, join(boards_dir, item))
else:
if id_ not in self._BOARDS_CACHE:
for boards_dir in bdirs:
if not isdir(boards_dir):
continue
manifest_path = join(boards_dir, "%s.json" % id_)
if isfile(manifest_path):
_append_board(id_, manifest_path)
break
if id_ not in self._BOARDS_CACHE:
raise exception.UnknownBoard(id_)
return self._BOARDS_CACHE[id_] if id_ else self._BOARDS_CACHE
def board_config(self, id_):
return self.get_boards(id_)
def get_package_type(self, name):
return self.packages[name].get("type")
def configure_default_packages(self, variables, targets):
# enable used frameworks
frameworks = variables.get("pioframework", [])
if not isinstance(frameworks, list):
frameworks = frameworks.split(", ")
for framework in frameworks:
if not self.frameworks:
continue
framework = framework.lower().strip()
if not framework or framework not in self.frameworks:
continue
_pkg_name = self.frameworks[framework].get("package")
if _pkg_name:
self.packages[_pkg_name]['optional'] = False
# enable upload tools for upload targets
if any(["upload" in t for t in targets] + ["program" in targets]):
for name, opts in self.packages.iteritems():
if opts.get("type") == "uploader":
self.packages[name]['optional'] = False
# skip all packages in "nobuild" mode
# allow only upload tools and frameworks
elif "nobuild" in targets and opts.get("type") != "framework":
self.packages[name]['optional'] = True
def get_lib_storages(self):
storages = []
for opts in (self.frameworks or {}).values():
if "package" not in opts:
continue
pkg_dir = self.get_package_dir(opts['package'])
if not pkg_dir or not isdir(join(pkg_dir, "libraries")):
continue
libs_dir = join(pkg_dir, "libraries")
storages.append({"name": opts['package'], "path": libs_dir})
libcores_dir = join(libs_dir, "__cores__")
if not isdir(libcores_dir):
continue
for item in os.listdir(libcores_dir):
libcore_dir = join(libcores_dir, item)
if not isdir(libcore_dir):
continue
storages.append({
"name":
"%s-core-%s" % (opts['package'], item),
"path":
libcore_dir
})
return storages
class PlatformBoardConfig(object):
def __init__(self, manifest_path):
self._id = basename(manifest_path)[:-5]
assert isfile(manifest_path)
self.manifest_path = manifest_path
try:
self._manifest = util.load_json(manifest_path)
except ValueError:
raise exception.InvalidBoardManifest(manifest_path)
if not set(["name", "url", "vendor"]) <= set(self._manifest.keys()):
raise exception.PlatformioException(
"Please specify name, url and vendor fields for " +
manifest_path)
def get(self, path, default=None):
try:
value = self._manifest
for k in path.split("."):
value = value[k]
return value
except KeyError:
if default is not None:
return default
else:
raise KeyError("Invalid board option '%s'" % path)
def update(self, path, value):
newdict = None
for key in path.split(".")[::-1]:
if newdict is None:
newdict = {key: value}
else:
newdict = {key: newdict}
util.merge_dicts(self._manifest, newdict)
def __contains__(self, key):
try:
self.get(key)
return True
except KeyError:
return False
@property
def id(self):
return self._id
@property
def id_(self):
return self.id
@property
def manifest(self):
return self._manifest
def get_brief_data(self):
return {
"id":
self.id,
"name":
self._manifest['name'],
"platform":
self._manifest.get("platform"),
"mcu":
self._manifest.get("build", {}).get("mcu", "").upper(),
"fcpu":
int("".join([
c for c in str(
self._manifest.get("build", {}).get("f_cpu", "0L"))
if c.isdigit()
])),
"ram":
self._manifest.get("upload", {}).get("maximum_ram_size", 0),
"rom":
self._manifest.get("upload", {}).get("maximum_size", 0),
"connectivity":
self._manifest.get("connectivity"),
"frameworks":
self._manifest.get("frameworks"),
"debug":
self.get_debug_data(),
"vendor":
self._manifest['vendor'],
"url":
self._manifest['url']
}
def get_debug_data(self):
if not self._manifest.get("debug", {}).get("tools"):
return None
tools = {}
for name, options in self._manifest['debug']['tools'].items():
tools[name] = {}
for key, value in options.items():
if key in ("default", "onboard"):
tools[name][key] = value
return {"tools": tools}
def get_debug_tool_name(self, custom=None):
debug_tools = self._manifest.get("debug", {}).get("tools")
tool_name = custom
if tool_name == "custom":
return tool_name
if not debug_tools:
raise exception.DebugSupportError(self._manifest['name'])
if tool_name:
if tool_name in debug_tools:
return tool_name
raise exception.DebugInvalidOptions(
"Unknown debug tool `%s`. Please use one of `%s` or `custom`" %
(tool_name, ", ".join(sorted(debug_tools.keys()))))
# automatically select best tool
data = {"default": [], "onboard": [], "external": []}
for key, value in debug_tools.items():
if value.get("default"):
data['default'].append(key)
elif value.get("onboard"):
data['onboard'].append(key)
data['external'].append(key)
for key, value in data.items():
if not value:
continue
return sorted(value)[0]
assert any(item for item in data)
|
the-stack_106_16598
|
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "df59bf832ffdaf7c2179b6f9d80e23aaa57abc03"
TFRT_SHA256 = "40376db39e47a8b61235c40278974ac159543c78e86c6a788d9a3e9fe11f0f6c"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
|
the-stack_106_16602
|
from collections import defaultdict
import os
import argparse
import decimal
from ast import literal_eval
import sys
import json
import subprocess
import copy
import re as re_module
import tables
import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse
from sympy import re, im, Float, exp, diff
from pyne import nucname
from .tests.test_transmute import run_transmute_test
from .origen_all import TIME_STEPS, DAY
from .origen import array_mismatch, initial_vector
from .util import (plt_show_in_terminal, load_sparse_csr, diff_strs,)
from .cram import get_CRAM_from_cache, CRAM_coeffs, nsolve_intervals
from .partialfrac import (thetas_alphas, thetas_alphas_to_expr_complex,
customre)
from .gensolve import make_ijk
from .codegen import CRAM_matrix_exp_lambdify
def setup_matplotlib_rc():
from matplotlib import rcParams
rcParams['pgf.texsystem'] = 'lualatex'
rcParams["text.usetex"] = True
rcParams["font.family"] = 'serif'
rcParams["font.serif"] = "Computer Modern Roman"
rcParams["font.sans-serif"] = "Computer Modern Sans serif"
rcParams["font.monospace"] = "Computer Modern Typewriter"
def analyze_origen(origen_results, *, file=None, title=True):
plt.clf()
fig, ax = plt.subplots()
times = {
'ORIGEN': defaultdict(list),
'CRAM lambdify UMFPACK': defaultdict(list),
'CRAM lambdify SuperLU': defaultdict(list),
'CRAM py_solve': defaultdict(list),
}
label = {
'ORIGEN': 'ORIGEN',
'CRAM lambdify UMFPACK': 'CRAM SciPy solver (UMFPACK)',
'CRAM lambdify SuperLU': 'CRAM SciPy solver (SuperLU)',
'CRAM py_solve': 'CRAM C generated solver',
}
formats = {
'ORIGEN': '+',
'CRAM lambdify UMFPACK': 'x',
'CRAM lambdify SuperLU': '<',
'CRAM py_solve': '.',
}
offsets = {
'ORIGEN': 0,
'CRAM lambdify UMFPACK': -0.25,
'CRAM lambdify SuperLU': 0.25,
'CRAM py_solve': 0,
}
with tables.open_file(origen_results, mode='r') as h5file:
for run in 'ORIGEN', 'CRAM lambdify UMFPACK', 'CRAM lambdify SuperLU', 'CRAM py_solve':
for lib in h5file.root:
table = h5file.get_node(lib, run.lower().replace(' ', '-'))
for row in table:
exec_time = 'execution time CRAM lambdify' if run.startswith("CRAM lambdify") else 'execution time ' + run
times[run][row['time']].append(row[exec_time])
xvals = sorted(TIME_STEPS)
x = []
y = []
for i, t in enumerate(xvals):
itimes = times[run][sorted(times[run])[i]]
x += [10**offsets[run]*t]*len(itimes)
y += itimes
print("Longest", run, "runtime", max(y), "seconds")
print("Shortest", run, "runtime", min(y), "seconds")
ax.plot(x, y, formats[run], label=label[run])
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.15)
if title:
plt.title("""Runtimes for different solvers computing transmutation over
several starting libraries, nuclides, and timesteps.""")
ax.set_xscale('log')
ax.set_xticks(sorted(TIME_STEPS))
ax.xaxis.set_ticklabels([TIME_STEPS[i].replace(' ', '\n') for i in
sorted(TIME_STEPS)], size='small')
ax.set_yscale('log')
ax.legend()
plt.ylabel('Runtime (seconds)')
plt.xlabel('Time step t')
if file:
plt.savefig(file)
plt_show_in_terminal()
with tables.open_file(origen_results, mode='r') as h5file:
# First, check if any of the CRAM methods disagree
cram_rtol = 0
umfpack_pysolve_atol = 1e-13
superlu_pysolve_atol = umfpack_superlu_atol = 1e-9
print("Checking for mismatching CRAM values")
print("Using rtol=%s" % cram_rtol)
print("Using atol=%s for UMFPACK vs. py_solve" % umfpack_pysolve_atol)
print("Using atol=%s for SuperLU vs. py_solve" % superlu_pysolve_atol)
print("Using atol=%s for UMFPACK vs. SuperLU" % umfpack_superlu_atol)
mismatching = False
for lib in h5file.root:
nucs = h5file.get_node(lib, 'nucs')
runs = len(h5file.get_node(lib, 'origen'))
for i in range(runs):
CRAM_lambdify_umfpack_res = h5file.get_node(lib, 'cram-lambdify-umfpack')[i]['CRAM lambdify atom fraction']
CRAM_lambdify_superlu_res = h5file.get_node(lib, 'cram-lambdify-superlu')[i]['CRAM lambdify atom fraction']
CRAM_py_solve_res = h5file.get_node(lib, 'cram-py_solve')[i]['CRAM py_solve atom fraction']
time_step = TIME_STEPS[h5file.get_node(lib, 'cram-lambdify-umfpack')[i]['time']]
initial_nuc = nucs[np.where(h5file.get_node(lib,
'cram-lambdify-umfpack')[i]['initial vector'])[0]][0].decode()
d = {
'CRAM lambdify UMFPACK': CRAM_lambdify_umfpack_res,
'CRAM lambdify SuperLU': CRAM_lambdify_superlu_res,
'CRAM py_solve': CRAM_py_solve_res,
}
for a_desc, b_desc, atol in (
['CRAM lambdify UMFPACK', 'CRAM lambdify SuperLU', umfpack_superlu_atol],
['CRAM lambdify UMFPACK', 'CRAM py_solve', umfpack_pysolve_atol],
['CRAM lambdify SuperLU', 'CRAM py_solve', superlu_pysolve_atol],
):
a, b = d[a_desc], d[b_desc]
mismatching_indices = array_mismatch(a, b, rtol=cram_rtol, atol=atol)
if mismatching_indices:
mismatching = True
print()
print("%s and %s mismatch with library %s at time %s with starting nuclide %s"
% (a_desc, b_desc, lib._v_name, time_step, initial_nuc))
print("Mismatching elements sorted by error (%s, %s, symmetric relative error, absolute error):" % (a_desc, b_desc))
rel_error = abs(a - b)/(a + b)
abs_error = abs(a - b)
for i in mismatching_indices:
print("%s %s %s %s %s" % (nucs[i].decode(), a[i], b[i],
rel_error[i], abs_error[i]))
if not mismatching:
print("No mismatching values found!")
for time in [i for i in sorted(TIME_STEPS) if i < 100*DAY]:
time_step = TIME_STEPS[time]
print()
print("Checking mismatching ORIGEN values for time", time_step)
origen_val_missing = []
He4_atom_fraction = []
mismatching = False
for lib in h5file.root:
nucs = h5file.get_node(lib, 'nucs')
runs = len(h5file.get_node(lib, 'origen'))
for i in range(runs):
row_time = h5file.get_node(lib,
'cram-lambdify-umfpack')[i]['time']
if row_time != time:
# TODO: Do this more efficiently
continue
CRAM_py_solve_res = h5file.get_node(lib, 'cram-py_solve')[i]['CRAM py_solve atom fraction']
CRAM_py_solve_res_normalized = CRAM_py_solve_res/np.sum(CRAM_py_solve_res)
ORIGEN_res_weighted = h5file.get_node(lib, 'origen')[i]['ORIGEN atom fraction']
ORIGEN_res_materials = h5file.get_node(lib, 'origen')[i]['ORIGEN mass fraction']
initial_nuc = nucs[np.where(h5file.get_node(lib,
'cram-lambdify-umfpack')[i]['initial vector'])[0]][0].decode()
d = {
'ORIGEN atom fractions': ORIGEN_res_weighted,
'ORIGEN mass fractions': ORIGEN_res_materials,
'CRAM py_solve': CRAM_py_solve_res,
'CRAM py_solve normalized': CRAM_py_solve_res_normalized,
}
for a_desc, b_desc in (
['CRAM py_solve', 'ORIGEN atom fractions'],
['CRAM py_solve normalized', 'ORIGEN mass fractions'],
):
a, b = d[a_desc], d[b_desc]
mismatching_indices = array_mismatch(a, b)
if mismatching_indices:
mismatching = True
print()
print("%s and %s mismatch with library %s at time %s with starting nuclide %s"
% (a_desc, b_desc, lib._v_name, time_step, initial_nuc))
print("Mismatching elements sorted by error (%s, %s, symmetric relative error, absolute error):" % (a_desc, b_desc))
rel_error = abs(a - b)/(a + b)
abs_error = abs(a - b)
for i in mismatching_indices:
if b[i] == 0:
origen_val_missing.append((nucs[i].decode(),
lib._v_name, time_step, initial_nuc, b_desc))
if nucs[i].decode() == 'He4' and 'atom' in b_desc:
He4_atom_fraction.append((lib._v_name,
time_step, initial_nuc))
print("%s %s %s %s %s" % (nucs[i].decode(), a[i], b[i],
rel_error[i], abs_error[i]))
print()
print("Combinations where ORIGEN has a 0 value and CRAM does not:")
for vals in origen_val_missing:
print(', '.join(vals))
if not origen_val_missing:
print("None!")
print()
print("Combinations where He4 mismatches in atom fraction:")
for vals in He4_atom_fraction:
print(", ".join(vals))
if not He4_atom_fraction:
print("None!")
if not mismatching:
print("No mismatching values found!")
def analyze_nofission(*, run_all=False, file=None, title=True, thetas=None,
alphas=None, alpha0=None, nofission_data=os.path.join(os.path.dirname(__file__), 'tests',
'data', 'pwru50_400000000000000.0_nofission.npz'), degree=14):
try:
import scikits.umfpack
del scikits
except ImportError:
import traceback
traceback.print_exc()
sys.exit("scikit-umfpack is required to run the nofission analysis")
valid_time_names = TIME_STEPS.values() if run_all else ['1 day', '1 year', '1000 years', '1 million years']
nofission_transmutes = {t: {} for t in valid_time_names}
for time, time_name in sorted(TIME_STEPS.items()):
if time_name not in valid_time_names:
continue
if run_all:
for f in os.listdir('data'):
if f.endswith('_nofission.npz'):
lib = f.split('_', 1)[0]
data = os.path.join('data', f)
print("analyzing", data, 'on', time_name)
nofission_transmutes[time_name][lib] = run_transmute_test(data,
degree, 200, time, run_all=False, _print=True,
thetas=thetas, alphas=alphas, alpha0=alpha0)
else:
print("analyzing", nofission_data, 'on', time_name)
nofission_transmutes[time_name]['pwru50'] = run_transmute_test(nofission_data,
degree, 200, time, run_all=run_all, _print=True,
thetas=thetas, alphas=alphas,
alpha0=alpha0)
plot_nofission_transmutes(nofission_transmutes, run_all=run_all,
file=file, title=title)
return nofission_transmutes
def plot_nofission_transmutes(nofission_transmutes, *, run_all=False,
file=None, title=True):
setup_matplotlib_rc()
valid_time_names = TIME_STEPS.values() if run_all else ['1 day', '1 year', '1000 years', '1 million years']
for time, time_name in sorted(TIME_STEPS.items()):
if time_name not in valid_time_names:
continue
plt.switch_backend('pgf')
plt.clf()
fig, axes = plt.subplots(1, 4, sharey=True)
fig.set_size_inches(1.5*6.4, 1.5/4*4.8)
for lib in nofission_transmutes[time_name]:
for (r, title), ax in zip([
('scipy.sparse.linalg.expm', r'\texttt{scipy.\allowbreak{}sparse.\allowbreak{}linalg.\allowbreak{}expm}'),
('part_frac_complex UMFPACK', '\\texttt{CRAM (sympy.\\allowbreak{}lambdify}\nwith UMFPACK)'),
('part_frac_complex SuperLU', '\\texttt{CRAM (sympy.\\allowbreak{}lambdify}\nwith SuperLU)'),
('transmutagen generated C solver', 'CRAM (transmutagen\ngenerated C solver)'),
],
axes):
m = nofission_transmutes[time_name][lib][r]
if not isinstance(m, np.ndarray):
m = m.toarray()
if m is None or np.isnan(m).any() or np.isinf(m).any():
print("Could not compute", r, "for", lib)
ax.text(0.5, 0.5, "Calculation resulted\nin NaNs",
va='center', ha='center', transform=ax.transAxes, size=12)
ax.set_xticks([])
else:
ax.hist(np.asarray(np.sum(m, axis=0) - 1).flatten())
ax.xaxis.offsetText.set_size(14)
# if title:
# fig.suptitle(time_name, y=1.08)
ax.set_yscale('log', nonposy='clip')
# # Matplotlib's default ticks aren't great. We include the
# # endpoints, and 0 if it isn't too close to the endpoints
# # (within 5%).
# xmin, xmax = ax.get_xlim()
# if xmin < 0 < xmax and abs(xmin) > 0.05*(xmax - xmin) < abs(xmax):
# locs = [xmin, 0, xmax]
# else:
# locs = [xmin, xmax]
# ax.set_xticks(locs)
#
# # Put "x 10^-19" on every x-axis tick
# ax.set_xticklabels([pretty_float(i) for i in locs])
ax.ticklabel_format(scilimits=(-3, 3), axis='x')
ax.minorticks_off()
locs = ax.get_yticks()
ax.set_ylim([0.5, 4000])
ax.set_yticklabels(["$%d$" % int(i) for i in locs])
if title:
ax.set_title(title)
# Only the last axis
fig.text(0.07, 0.5, "Count", ha='center', va='center', rotation='vertical')
fig.text(0.5, -0.3, r'$\sum_i \left (e^{-At}\right )_{i,j} - 1$', ha='center', va='center')
print(time_name)
# plt_show_in_terminal()
if file:
path, ext = os.path.splitext(file)
filename = '-'.join([path, lib, time_name.replace(' ', '-')]) + ext
print("Saving to", filename)
else:
filename = file
if filename:
plt.savefig(filename, bbox_inches='tight')
plt.close()
def pretty_float(i):
"""
This function is specifically for the xticks in the nofission graphs. It
might not give appropiate representations for other contexts.
"""
if i == 0:
return r'$0^{\vphantom{0}}$'
float_exponent = np.floor(np.log10(abs(i)))
if -3 <= float_exponent <= 3:
return r"$%s^{\vphantom{0}}$" % str(i)[:6]
lead_digit, exponent = ("%.0e" % i).split('e')
return r"$%s\times 10^{%s}$" % (lead_digit, exponent)
def plot_matrix_sum_histogram(m, *, title='', axis=0, file=None):
plt.clf()
plt.hist(np.asarray(np.sum(m, axis=axis)).flatten())
plt.yscale('log', nonposy='clip')
if title:
plt.title(title)
plt_show_in_terminal()
if file:
plt.savefig(file)
plt.close()
def decay_matrix():
from .gensolve import pyne_decay_matrix, make_ijk
json_data = json.load(open(os.path.join(os.path.dirname(__file__), 'data', 'gensolve.json')))
fromto = json_data['fromto']
nucs = json_data['nucs']
N = len(nucs)
ijkeys = [(nucs.index(j), nucs.index(i)) for i, j in json_data['fromto']]
ij = {k: l for l, k in enumerate(sorted(ijkeys))}
ijk = make_ijk(ij, N)
ijnucs = {(nucs[i], nucs[j]): k for (i, j), k in ijk.items()}
return scipy.sparse.csr_matrix((pyne_decay_matrix(fromto, ijnucs),
([i for i, j in sorted(ij)], [j for i, j in sorted(ij)])))
def analyze_eigenvals(*, pwru50_data=None, file=None, title=True):
if not pwru50_data:
pwru50_data = os.path.join(os.path.dirname(__file__), 'tests', 'data', 'pwru50_400000000000000.0.npz')
nucs, matpwru50 = load_sparse_csr(pwru50_data)
matdecay = decay_matrix()
for desc, mat in {'pwru50': matpwru50, 'decay': matdecay}.items():
plt.clf()
print("analyzing eigenvalues of", desc)
eigvals, eigvects = scipy.sparse.linalg.eigen.eigs(mat, mat.shape[0]-2)
plt.scatter(np.real(eigvals), np.imag(eigvals))
plt.yscale('symlog', linthreshy=1e-20)
plt.xscale('symlog')
# plt.xlim([np.min(np.real(eigvals))*2, 1])
plt.ylim([np.min(np.imag(eigvals))*10, np.max(np.imag(eigvals))*10])
plt.xticks([-10**i for i in range(1, 1+int(np.ceil(np.log10(-plt.xlim()[0]))), 2)]
+ [0]
+ [10**i for i in range(1, int(np.log10(plt.xlim()[1])), 2)])
plt.yticks([-10**i for i in range(-19, int(np.log10(-plt.ylim()[0])), 2)]
+ [0]
+ [10**i for i in range(-19, int(np.log10(plt.ylim()[1])), 2)])
plt.minorticks_off()
if title:
plt.title("Eigenvalues of transmutation matrix for " + desc)
plt_show_in_terminal()
if file:
path, ext = os.path.splitext(file)
plt.savefig(path + '_' + desc + ext)
def analyze_cram_digits(max_degree=20):
print("Computing coefficients (or getting from cache)")
exprs = defaultdict(dict)
cram_coeffs = defaultdict(dict)
part_frac_coeffs = defaultdict(lambda: defaultdict(dict))
# {degree: {prec: {'p': [coeffs], 'q': [coeffs]}}}
correct_expr_digits = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
# {degree: {prec: {'thetas': [[real coeffs, ..., im coeffs]], 'alphas':
# [[real coeffs, ..., im coeffs]], 'alpha0', [[real coeff, im coeff]]}}}
correct_part_frac_digits = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for degree in range(1, max_degree+1):
print("Degree", degree)
for prec in range(100, 1100, 100):
print("Precision", prec)
exprs[degree][prec] = get_CRAM_from_cache(degree, prec, log=True, plot=True)
cram_coeffs[degree][prec] = CRAM_coeffs(exprs[degree][prec], prec)
thetas, alphas, alpha0 = thetas_alphas(exprs[degree][prec], prec)
t = sorted(thetas, key=im)
part_frac_coeffs[degree][prec]['thetas'] = [[re(i) for i in t], [im(i) for i in t]]
t = sorted(alphas, key=im)
part_frac_coeffs[degree][prec]['alphas'] = [[re(i) for i in t], [im(i) for i in t]]
part_frac_coeffs[degree][prec]['alpha0'] = [[re(alpha0)], [im(alpha0)]]
# Assume that 1000 has the most correct digits
coeffs1000 = cram_coeffs[degree][1000]
part_frac_coeffs1000 = part_frac_coeffs[degree][1000]
for prec in range(100, 1000, 100):
coeffs = cram_coeffs[degree][prec]
for l in 'pq':
for coeff, coeff1000 in zip(coeffs[l], coeffs1000[l]):
correct_expr_digits[degree][prec][l].append(len(os.path.commonprefix([coeff,
coeff1000])) - 1)
these_part_frac_coeffs = part_frac_coeffs[degree][prec]
for l in ['thetas', 'alphas', 'alpha0']:
for i in range(2):
for coeff, coeff1000 in zip(these_part_frac_coeffs[l][i], part_frac_coeffs1000[l][i]):
format_str = '{:.%se}' % (prec - 1)
coeff = format_str.format(Float(coeff, prec))
coeff1000 = format_str.format(Float(coeff1000, prec))
correct_part_frac_digits[degree][prec][l].append(len(os.path.commonprefix([coeff,
coeff1000])) - 1)
for typ, L, correct_digits in [('CRAM expression', 'pq', correct_expr_digits),
('Partial fraction', ['thetas', 'alphas', 'alpha0'], correct_part_frac_digits),]:
print("Correct digits for", typ)
# Plot minimum number of correct digits as a function of precision
plt.clf()
fig, ax = plt.subplots()
minvals = defaultdict(list)
for degree in range(1, max_degree+1):
print("Degree", degree)
for prec in range(100, 1000, 100):
print(" Precision", prec)
for l in L:
print(' ', end='')
print(l, end=' ')
for i in correct_digits[degree][prec][l]:
print(i, end=' ')
print()
minvals[degree].append(min(sum([correct_digits[degree][prec][i] for i in L], [])))
ax.plot(range(100, 1000, 100), minvals[degree], label=degree)
# TODO: Make window wider so the legend isn't chopped off
ax.legend(title=typ + " coefficients by degree", loc="upper left", bbox_to_anchor=(1,1))
plt.ylabel('Number of correct digits')
plt.xlabel('Precision')
plt_show_in_terminal()
# Plot minimum number of correct digits as a function of degree
plt.clf()
fig, ax = plt.subplots()
minvals = defaultdict(list)
for prec in range(100, 1000, 100):
for degree in range(1, max_degree+1):
minvals[prec].append(min(sum([correct_digits[degree][prec][i] for i in L], [])))
ax.plot(range(1, max_degree+1), minvals[prec], label=prec)
# TODO: Make window wider so the legend isn't chopped off
ax.legend(title=typ + " coefficients by precision", loc="upper left", bbox_to_anchor=(1,1))
plt.ylabel('Number of correct digits')
plt.xlabel('Degree')
ax.set_xticks(range(1, max_degree+1))
plt_show_in_terminal()
def _latex_typ(typ, idx):
typ_mapping = {'thetas': r'\theta', 'alphas': r'\alpha', 'alpha0':
r'\alpha_0'}
if typ != 'alpha0':
return '$' + typ_mapping[typ] + '_' + str(idx+1) + '$'
else:
return '$' + typ_mapping[typ] + '$'
def analyze_pusa_coeffs(*, file=None, title=True, latex=False):
from .tests.pusa_coeffs import (part_frac_coeffs, plot_difference,
transmutagen_cram_error, paper_cram_error, get_paper_part_frac,
get_paper_thetas_alphas)
from .partialfrac import t
try:
import colorama
except ImportError:
raise ImportError("colorama is required to use diff_strs")
print("Differing coefficients:")
for degree in [14, 16]:
if latex:
path, ext = os.path.splitext(latex)
filename = '-'.join([path, str(degree)]) + ext
print("Saving LaTeX table to", filename)
f = open(filename, 'w')
print("Degree:", degree)
for typ in ['thetas', 'alphas', 'alpha0']:
for idx in range(degree//2) if typ != 'alpha0' else range(1):
if latex:
f.write(r'\multirow{2}{*}{%s} & ' % _latex_typ(typ, idx))
else:
print(typ, '-', idx, sep='', end=': ')
for real_imag in ['real', 'imag']:
expr = get_CRAM_from_cache(degree, 200)
thetas, alphas, alpha0 = thetas_alphas(expr, 200)
format_str = '{:+.19e}'
paper_coeffs = part_frac_coeffs[degree]
# Thetas and alphas in the paper are negative what we have, and are only
# counted once per conjugate.
if typ == 'thetas':
vals = [-i for i in thetas if im(-i) >= 0]
elif typ == 'alphas':
vals = [-j for i, j in zip(thetas, alphas) if im(-i) >= 0]
elif typ == 'alpha0':
vals = [alpha0]
val = sorted(vals, key=im)[idx]
real_val_paper, imag_val_paper = sorted(zip(paper_coeffs[typ]['real'],
paper_coeffs[typ]['imaginary']), key=lambda i: float(i[1]))[idx]
real_val, imag_val = val.as_real_imag()
if real_imag == 'real':
our_str, pusa_str = format_str.format(decimal.Decimal(repr(real_val))), real_val_paper
else:
our_str, pusa_str = format_str.format(decimal.Decimal(repr(imag_val))), imag_val_paper
if imag_val == 0:
# Decimal formats the string with e+19, which is
# undesirable. See
# https://bugs.python.org/issue31684.
our_str = "+0.0000000000000000000e+0"
machine_differences = (literal_eval(pusa_str) != literal_eval(our_str))
if latex:
latex_pusa_str = pusa_str
latex_our_str = our_str
# if machine_differences:
# latex_pusa_str = r'{\it %s}' % latex_pusa_str
# latex_our_str = r'{\it %s}' % latex_our_str
if real_imag == 'imag':
f.write(' & ')
latex_pusa_str = latex_pusa_str + 'j'
latex_our_str = latex_our_str + 'j'
# For whatever reason, \setulcolor only applies to the
# next \ul
if machine_differences:
f.write(r'\setulcolor{red}')
sep = ' & ' if not machine_differences else r' & \setulcolor{red}'
diff_strs(latex_pusa_str, latex_our_str, end=r'\\',
style='latex separated', sep=sep,
stop_chars='e', file=f)
if real_imag == 'imag' and typ != 'alpha0':
f.write(r'\cline{2-3}')
diff_strs(pusa_str, our_str, end=' ')
if machine_differences:
print(colorama.Back.RED, colorama.Fore.WHITE,
"<- Machine floats differ",
colorama.Style.RESET_ALL, sep='', end=' ')
print()
if latex:
f.close()
plt.ion()
plot_difference(file=file, all_plots=False)
part_fracs = {}
paper_part_fracs = {}
interval = (0, 100)
prec = 200
# {degree: {'pusa_alpha_errors': [...], 'transmutagen_alpha_errors': [...], 't0s': [...]}}
alpha_errors = defaultdict(lambda: defaultdict(list))
for degree in [14, 16]:
expr = get_CRAM_from_cache(degree, prec)
thetas, alphas, alpha0 = thetas_alphas(expr, prec)
part_frac = thetas_alphas_to_expr_complex(thetas, alphas, alpha0)
part_frac = part_frac.replace(customre, re)
paper_part_frac = get_paper_part_frac(degree).replace(customre, re)
paper_thetas, paper_alphas, paper_alpha0 = get_paper_thetas_alphas(degree)
part_fracs[degree] = part_frac
paper_part_fracs[degree] = paper_part_frac
print("Computing critical points for degree", degree)
critical_points = nsolve_intervals(diff(part_fracs[degree] - exp(-t), t),
interval, prec=prec)
print('-'*80)
print("Testing", len(critical_points), "points in", interval, "for degree", degree)
for t0 in critical_points:
print()
transmutagen_error = transmutagen_cram_error(degree, t0, prec)
pusa_error = paper_cram_error(degree, t0, prec)
expr = get_CRAM_from_cache(degree, prec)
thetas, alphas, alpha0 = thetas_alphas(expr, prec)
print('degree', degree, 'alpha0:\t\t%.20g' % alpha0)
transmutagen_alpha_error = abs(abs(transmutagen_error) - alpha0)
pusa_alpha_error = abs(abs(pusa_error) - paper_alpha0)
for name, error, alpha_error in [
("Our", transmutagen_error, transmutagen_alpha_error),
("Pusa", pusa_error, pusa_alpha_error)]:
print(name, "error near t=%.4f:\t%.20g" % (t0,
error))
alpha_error = abs(abs(error) - alpha0)
color = colorama.Fore.RED if alpha_error > 1e-20 else colorama.Fore.GREEN
print("Off by:", color, '\t\t\t%.5g' % alpha_error, colorama.Style.RESET_ALL)
alpha_error = abs(abs(error) - alpha0)
alpha_errors[degree]['t0s'].append(t0)
alpha_errors[degree]['transmutagen_alpha_errors'].append(transmutagen_alpha_error)
alpha_errors[degree]['pusa_alpha_errors'].append(pusa_alpha_error)
if transmutagen_alpha_error >= pusa_alpha_error:
print(colorama.Fore.RED, "Pusa error is better",
colorama.Style.RESET_ALL, sep='')
else:
print(colorama.Fore.GREEN, "Our error is better",
colorama.Style.RESET_ALL, sep='')
plt.clf()
for degree in [14, 16]:
plt.figure()
plt.plot(alpha_errors[degree]['t0s'],
alpha_errors[degree]['transmutagen_alpha_errors'], '.',
label=r'transmutagen')
plt.plot(alpha_errors[degree]['t0s'],
alpha_errors[degree]['pusa_alpha_errors'], '.',
label=r'Pusa~\cite{pusa2012correction}')
plt.title(r'degree %d' % degree)
plt.legend()
plt.xlabel('$t$')
plt.ylabel(r'$||e^{-t_0} - \hat{r}_{%d,%d}(t_0)| - \alpha_0|$' %
(degree, degree))
if file:
filename, ext = os.path.splitext(file)
filename += '-errors-' + str(degree)
plt.savefig(filename + ext)
# analyze_nofission(thetas=paper_thetas, alphas=paper_alphas,
# alpha0=paper_alpha0, file='test.pdf')
def analyze_gensolve(*, origen_json_file=None, json_file=None,
pairs_per_pass=1, runs=100, warm_up_runs=5, optimize=True):
from pyne import nucname
origen_json_data = json.load(origen_json_file or open(os.path.join(os.path.dirname(__file__), 'data', 'gensolve_origen.json')))
json_data = json.load(json_file or open(os.path.join(os.path.dirname(__file__), 'data', 'gensolve.json')))
# Make the nuclide list the same
new_nuclides = set(json_data['nucs']) - set(origen_json_data['nucs'])
removed_nuclides = set(origen_json_data['nucs']) - set(json_data['nucs'])
[origen_json_data['fromto'].append([i, i]) for i in new_nuclides]
[json_data['fromto'].append([i, i]) for i in removed_nuclides]
full_nucs = sorted(set(json_data['nucs']) | set(origen_json_data['nucs']), key=nucname.cinder)
json_data['nucs'] = origen_json_data['nucs'] = full_nucs
new_json = copy.deepcopy(origen_json_data)
new_fromtos = sorted(set(map(tuple, json_data['fromto'])) - set(map(tuple,
origen_json_data['fromto'])))
all_runtimes = []
added = [0]
print("Compiling 0/%d" % len(new_fromtos))
outfile = generate_gensolve_test(new_json, '0', flags=optimize)
print("Running 0/%d" % len(new_fromtos))
runtimes = run_gensolve_test(outfile)
print("Run", 0, "took", np.mean(runtimes), "seconds on average")
all_runtimes.append(runtimes)
for i, fromto in enumerate(new_fromtos, 1):
new_json['fromto'].append(list(fromto))
if (len(new_fromtos) - i) % pairs_per_pass != 0:
continue
added.append(i)
print("Compiling %d/%d" % (i, len(new_fromtos)))
outfile = generate_gensolve_test(new_json, str(i), flags=optimize)
print("Running %d/%d" % (i, len(new_fromtos)))
runtimes = run_gensolve_test(outfile, warm_up_runs=warm_up_runs, runs=runs)
print("Run", i, "took", np.mean(runtimes), "seconds on average")
all_runtimes.append(runtimes)
plt.clf()
plt.plot(added, list(map(np.mean, all_runtimes)))
plt.fill_between(added, list(map(np.min, all_runtimes)), list(map(np.max,
all_runtimes)), alpha=0.5)
plt_show_in_terminal()
def generate_gensolve_test(json_data, tag, directory='gensolve-tests',
recompile=False, flags=True):
from transmutagen.gensolve import generate, GCC_COMPILER_FLAGS
tag += '_O0' if not flags else ''
outscript = os.path.join(directory, 'test_%s.o' % tag)
if not recompile and os.path.exists(outscript):
print("Already compiled")
return outscript
os.makedirs(directory, exist_ok=True)
sourcefile = os.path.join(directory, 'test_%s.c' % tag)
generate(json_data=json_data, py_solve=False, degrees=[14],
outfile=sourcefile, timing_test=True)
compiler_flags = GCC_COMPILER_FLAGS if flags else ['-O0']
args = ['gcc'] + compiler_flags + ['-o', outscript] + [sourcefile]
print(' '.join(args))
subprocess.run(args, check=True)
return outscript
def run_gensolve_test(outscript, warm_up_runs=5, runs=100):
TIMING_TEST_OUT = re_module.compile(r'Took (.*) seconds\nSum of resulting vector: (.*)\n')
runtimes = []
for i in range(warm_up_runs):
p = subprocess.run([outscript], check=True, stdout=subprocess.PIPE)
for i in range(runs):
p = subprocess.run([outscript], check=True, stdout=subprocess.PIPE)
m = TIMING_TEST_OUT.match(p.stdout.decode('utf-8'))
if not m:
raise ValueError("Gensolve command output not in the expected format: %s" % p.stdout)
runtime, vector_sum = map(literal_eval, m.groups())
runtimes.append(runtime)
if not vector_sum == 1:
raise ValueError("Gensolve vector sum not 1 (%s)" % m.group(2))
return runtimes
# Based on https://matplotlib.org/users/event_handling.html
class PlotLUMatrix:
def __init__(self, N, extra=(), *, include_diagonals=False, img_type='imshow',
scatter_settings=None, axes=None):
if img_type not in ['imshow', 'scatter']:
raise ValueError("img_type should be 'imshow' or 'scatter'")
if not axes:
fig = plt.figure()
axes = fig.add_subplot(111)
self.axes = axes
self.include_diagonals = include_diagonals
self.img_type = img_type
self.scatter_settings = scatter_settings or {}
self.extra = list(extra)
self.N = N
self._make_matrix_data()
self.press = None
self.adding = None
self.make_image()
def _make_matrix_data(self):
extra = self.extra
N = self.N
diags = [(i, i) for i in range(N)] if self.include_diagonals else []
ij = {i: j for j, i in enumerate(diags + extra)}
a = np.zeros((N, N), dtype=int)
for i, j in ij:
a[i, j] = 1
# Make sure the diagonals are included for this part
for j, i in enumerate(range(N), len(diags + extra)):
ij[(i, i)] = j
ijk = make_ijk(ij, N)
self.ijk = ijk
b = np.zeros((N, N), dtype=int)
for i, j in ijk:
b[i, j] = 1
data = a + b
self.data = data
def make_image(self):
from matplotlib.patches import Patch
axes = self.axes
if self.img_type == 'imshow':
img = axes.imshow(self.data)
patches = [
Patch(color=img.cmap(img.norm(0)),
label="zero value"),
Patch(color=img.cmap(img.norm(1)),
label="zero value that must be included for LU"),
Patch(color=img.cmap(img.norm(2)),
label="nonzero value"),
]
axes.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
self.image = img
elif self.img_type == 'scatter':
s = axes.scatter(scipy.sparse.coo_matrix(self.data == 1).col,
self.N - scipy.sparse.coo_matrix(self.data == 1).row,
label='zero value that must\nbe included for LU',
**self.scatter_settings)
s.axes.scatter(scipy.sparse.coo_matrix(self.data == 2).col,
self.N - scipy.sparse.coo_matrix(self.data == 2).row,
label='nonzero value', **self.scatter_settings)
axes.legend()
self.image = s
class InteractiveLUMatrix(PlotLUMatrix):
def __init__(self, N, *, extra=(), img_type='imshow'):
if img_type != 'imshow':
raise ValueError("InteractiveLUMatrix requires img_type='imshow'")
super().__init__(N, extra=extra, img_type='imshow')
self._connect()
def _update_image(self):
self.image.set_data(self.data)
self.image.figure.canvas.draw()
def _connect(self):
'connect to all the events we need'
self.cidpress = self.image.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.image.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.image.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
def _invert(self, event):
# Inverted
x, y = int(event.ydata+0.5), int(event.xdata+0.5)
if (x, y) in self.extra:
# When dragging, only add or only remove points
if self.adding == False:
return
self.extra.remove((x, y))
self.adding = True
else:
if self.adding == True:
return
self.extra.append((x, y))
self.adding = False
def on_press(self, event):
self._invert(event)
self._make_matrix_data()
self._update_image()
self.press = event.xdata, event.ydata
def on_motion(self, event):
'on motion we will move the image if the mouse is over us'
if self.press is None:
return
self._invert(event)
self._make_matrix_data()
self._update_image()
def on_release(self, event):
'on release we reset the press data'
self.press = None
self.adding = None
self._update_image()
def disconnect(self):
'disconnect all the stored connection ids'
self.image.figure.canvas.mpl_disconnect(self.cidpress)
self.image.figure.canvas.mpl_disconnect(self.cidrelease)
self.image.figure.canvas.mpl_disconnect(self.cidmotion)
def analyze_lusolve(*, N=100, interactive=False, json_file=None, file=None):
plt.clf()
if interactive:
plt.interactive(True)
I = InteractiveLUMatrix(N, include_diagonals=True)
plt.show(block=True)
I.disconnect()
else:
scatter_settings = dict(alpha=0.5, marker='.')
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
fig.set_size_inches(1.5*6.4, 1.5/2*4.8)
json_data = json.load(json_file or
open(os.path.join(os.path.dirname(__file__), 'data',
'gensolve.json')))
nucsid = sorted(json_data['nucs'], key=nucname.id)
ijkeysid = [(nucsid.index(j), nucsid.index(i)) for i, j in
json_data['fromto']]
print("Nonzero entries:", len(ijkeysid))
Iid = PlotLUMatrix(len(nucsid), extra=ijkeysid, img_type='scatter',
scatter_settings=scatter_settings, axes=ax1)
print("id IJK:", len(Iid.ijk), "(an additional",
len(Iid.ijk) - len(ijkeysid), "entries)")
nucscinder = sorted(json_data['nucs'], key=nucname.cinder)
ijkeyscinder = [(nucscinder.index(j), nucscinder.index(i)) for i, j in
json_data['fromto']]
Icinder = PlotLUMatrix(len(nucscinder), extra=ijkeyscinder,
img_type='scatter', scatter_settings=scatter_settings, axes=ax2)
print("Cinder IJK:", len(Icinder.ijk), "(an additional",
len(Icinder.ijk) - len(ijkeyscinder), "entries)")
print("1 - Cinder IJK/id IJK", 1 - len(Icinder.ijk)/len(Iid.ijk))
print("id IJK/Cinder IJK", len(Iid.ijk)/len(Icinder.ijk))
N = Iid.N
# TODO: Allow to pass this in as an option
ax1.axis([500, 1000, N - 1000, N - 490])
ax2.axis([500, 1000, N - 1000, N - 490])
if file:
print("Saving to", file)
plt.savefig(file, bbox_inches='tight')
def analyze_degrees(*, pwru50_data=None, file=None):
from matplotlib import cycler
if not pwru50_data:
pwru50_data = os.path.join(os.path.dirname(__file__), 'tests', 'data', 'pwru50_400000000000000.0.npz')
nucs, data = load_sparse_csr(pwru50_data)
ns = list(range(30, 2, -2)) # [30, 28, ..., 4]
print("Computing CRAM functions")
fs = []
for n in ns + [ns[-1] - 2]:
print(n)
fs.append(CRAM_matrix_exp_lambdify(n, 200))
b = initial_vector("U235", nucs)
print("Computing the exponentials of the matrices")
xs = {}
diffs = {}
for t in TIME_STEPS:
xs[t] = [f(-data*t, b) for f in fs]
diffs[t] = list(zip(xs[t][:-1], xs[t][1:]))
plt.clf()
default_cycler = plt.rcParams['axes.prop_cycle']
plt.axes().set_prop_cycle(default_cycler + cycler('linestyle', ['-', '--',
':', '-.', '-', '--', ':', '-.', '-', '--']))
for t in sorted(TIME_STEPS):
plt.plot(ns, [np.max(np.abs(a - b)) for a, b in diffs[t]],
label=TIME_STEPS[t])
plt.legend()
plt.xticks(ns)
plt.yscale('log')
plt.ylabel(r"$\mathrm{max}(|\hat{r}_{n,n}(-At)b - \hat{r}_{n-2,n-2}(-At)b|)$")
plt.xlabel(r"$n$")
if file:
plt.savefig(file)
plt_show_in_terminal()
def analyze():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--file', help="""File name to save the plot(s) to.
For --eigenvals, a filename like "eigenvals.pdf" will be saved as
"eigenvals_pwru50.pdf" and "eigenvals_decay.pdf".
For --nofission, a filename like "nofission.pdf" will be saved as
"nofission-pwru50-1-day.pdf", "nofission-pwru50-1-year.pdf",
"nofission-pwru50-1000-years.pdf", and
"nofission-pwru50-1-million-years.pdf".
If not provided the plot is not saved.""")
parser.add_argument('--no-title', action='store_false', dest='title',
help="""Don't add a title to plots""")
parser.add_argument('--pwru50-data', help="""Path to pwru50 data file to
analyze. The default is the data that is in the
transmutagen/tests/data directory. Used for the --eigenvals and
--degrees analyses.""", default=os.path.join(os.path.dirname(__file__), 'tests',
'data', 'pwru50_400000000000000.0.npz'))
origen = parser.add_argument_group('origen')
origen.add_argument('--origen', action='store_true', dest='origen',
help="""Run the origen analysis.""")
origen.add_argument('--origen-results', default='data/results.hdf5',
help="""HDF5 file for the results of the ORIGEN runs.""")
nofission = parser.add_argument_group('nofission')
nofission.add_argument('--nofission', action='store_true',
dest='nofission', help="""Run the nofission analysis.""")
nofission.add_argument('--run-all', action='store_true', help="""Run the
nofission analysis on all the nofission data in the data/ directory
against all time steps and with all solvers. The default is to run the
analysis on the pwru50 data in the transmutagen/tests directory and
only against 1 day, 1 year, 1000 years, and 1 million years,
against the generated C solver, part_frac_complex, and
scipy.sparse.linalg.expm.""")
nofission.add_argument('--nofission-data',
default=os.path.join(os.path.dirname(__file__), 'tests',
'data', 'pwru50_400000000000000.0_nofission.npz'), help="""Data file to use
for nofission analysis (ignored when --run-all is passed). The default
is %(default)s.""")
nofission.add_argument('--degree', default=14, type=int, help="""CRAM
degree to do the nofission analysis with.""")
eigenvals = parser.add_argument_group('eigenvals')
eigenvals.add_argument('--eigenvals', action='store_true',
dest='eigenvals', help="""Run the eigenvalue analysis.""")
cram_digits = parser.add_argument_group('cram-digits')
cram_digits.add_argument('--cram-digits', action='store_true', help="""Analyze
accuracy of CRAM digits. WARNING: If cache values have not been
precomputed, this will take a long time (> 1 day) to compute.""")
cram_digits.add_argument('--max-degree', type=int, help="""Max degree for
--cram-digits. Default is 20.""", default=20)
pusa_coeffs = parser.add_argument_group('Pusa coefficients')
pusa_coeffs.add_argument('--pusa-coeffs', action='store_true',
help="""Analyze the coefficients from the Maria Pusa paper "Correction to
Partial Fraction Decomposition Coefficients for Chebyshev Rational
Approximation on the Negative Real Axis".""")
pusa_coeffs.add_argument('--latex', help="""Save LaTeX table to the given
file. A filename like pusa-table.tex will result in pusa-table-14.tex and
pusa-table-16.tex.""")
gensolve = parser.add_argument_group("Gensolve")
gensolve.add_argument('--gensolve', action='store_true', help="""Run
gensolve timing analysis.""")
gensolve.add_argument('--pairs-per-pass', help="""Number of from-to pairs
to add on each pass. The default is %(default)s.""", default=1, type=int)
gensolve.add_argument('--warm-up-runs', help="""Number of times to run the
command first without recording the output. The default is
%(default)s.""", default=5, type=int)
gensolve.add_argument('--runs', help="""Number of times to run the
command. The default is %(default)s.""", default=100, type=int)
gensolve.add_argument('--no-optimize', help="""Don't add optimizing
compiler flags. The default is to add them.""", action='store_false',
dest='optimize', default=True)
lusolve = parser.add_argument_group("LUSolve")
lusolve.add_argument('--lusolve', action='store_true', help="""Run
LU solve analysis.""")
lusolve.add_argument('--interactive', action='store_true', help="""Interactive analysis. The default is a noninteractive analysis of the
ORIGEN sparsity pattern.""")
lusolve.add_argument('--N', help="""Size of the matrix when using --interactive. The default is %(default)s""", default=100, type=int)
degrees = parser.add_argument_group("Degrees")
degrees.add_argument('--degrees', action='store_true', help="""Run degrees analysis.""")
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
args = parser.parse_args()
setup_matplotlib_rc()
if args.origen:
analyze_origen(args.origen_results, file=args.file,
title=args.title)
if args.nofission:
analyze_nofission(run_all=args.run_all, file=args.file,
title=args.title, nofission_data=args.nofission_data,
degree=args.degree)
if args.eigenvals:
analyze_eigenvals(pwru50_data=args.pwru50_data,
file=args.file, title=args.title)
if args.cram_digits:
analyze_cram_digits(args.max_degree)
if args.pusa_coeffs:
analyze_pusa_coeffs(file=args.file, title=args.title, latex=args.latex)
if args.gensolve:
analyze_gensolve(pairs_per_pass=args.pairs_per_pass, runs=args.runs,
warm_up_runs=args.warm_up_runs, optimize=args.optimize)
if args.lusolve:
analyze_lusolve(N=args.N, interactive=args.interactive, file=args.file)
if args.degrees:
analyze_degrees(pwru50_data=args.pwru50_data, file=args.file)
if __name__ == '__main__':
analyze()
|
the-stack_106_16603
|
import numpy as np
import logging
import sys
import matplotlib.pyplot as plt
logging.basicConfig(level=logging.INFO)
ca_file = "ca_red.vec"
es_file = "es_red.vec"
def file2wordsNmatrix(file):
logging.info(f"> Processing file {file}")
words = []
matrix = []
with open(file) as fd:
next(fd) # Skip first line
for line in fd:
try:
line = line.rstrip().split(" ")
word = line[0]
values = line[1:]
values = [float(v) for v in values]
words.append(word)
matrix.append(values)
except e:
logging.warning("Couldn't read line")
matrix = np.array(matrix)
logging.info(f"< Processed file {file}")
return words, matrix
ca_words, ca_matrix = file2wordsNmatrix(ca_file)
es_words, es_matrix = file2wordsNmatrix(es_file)
def word2encoding(words, matrix):
d = {}
for (w, e) in zip(words, matrix):
d[w] = e
return d
word2encoding_ca = word2encoding(ca_words, ca_matrix)
word2encoding_es = word2encoding(es_words, es_matrix)
from collections import defaultdict
def words2wordsBuckets(words):
d = defaultdict(lambda: [])
for w in words:
first_letter = w[0]
d[first_letter].append(w)
return d
sorted_ca = words2wordsBuckets(ca_words)
sorted_es = words2wordsBuckets(es_words)
import epitran
epi_ca = epitran.Epitran("cat-Latn")
epi_es = epitran.Epitran("spa-Latn")
def words2phonetics(words, epi, lang):
logging.info(f"> Phonetics of {lang}")
d = dict()
for w in words:
d[w] = epi.xsampa_list(w)
logging.info(f"< Phonetics of {lang} done")
return d
word2phonetics_ca = words2phonetics(ca_words, epi_ca, "CA")
word2phonetics_es = words2phonetics(es_words, epi_es, "ES")
flatten = lambda l: [item for sublist in l for item in sublist]
phonemes_ca = set(flatten([phonemes for _, phonemes in word2phonetics_ca.items()]))
phonemes_es = set(flatten([phonemes for _, phonemes in word2phonetics_es.items()]))
all_phonemes = phonemes_ca.union(phonemes_es)
print(all_phonemes)
words = [
("francia", "frança"),
("zona", "zona"),
("externos", "externs"),
("encuentra", "troba"),
("telenovela", "esportius"),
("desplazamiento", "desplaçament"),
("sindicato", "sessions"),
("trío", "grup"),
("blanco", "negre"),
("calor", "fred"),
]
from strsimpy.normalized_levenshtein import NormalizedLevenshtein
from scipy.spatial import distance
from strsimpy.weighted_levenshtein import WeightedLevenshtein
def substitution_cost(char_a, char_b):
# consonants = [
# "l",
# "x",
# # "i",
# # "a",
# "r",
# "ts",
# "l:",
# "L",
# "j",
# "m:",
# "s",
# "g",
# "tS",
# "b:",
# "n:",
# "Z",
# # "u",
# "g:",
# "dZ",
# "d:",
# "w",
# "z",
# "p:",
# "d",
# "n",
# "f",
# "m",
# "p",
# # "O",
# # "o",
# "S",
# # "E",
# "4",
# "tK",
# "b",
# "j\\",
# "dz",
# # "@",
# "N",
# "L:",
# "k",
# "J",
# "t",
# # "e",
# ]
vowels = ["a", "e", "E", "i", "o", "O", "u", "@"]
vowels_graph = [
"@i",
"@u",
"@e",
"@E",
"@a",
"@O",
"@o",
"ie",
"uo",
"eE",
"oO",
"Ea",
"aO",
]
if char_a == char_b:
return 0
a_is_vowel = char_a in vowels
b_is_vowel = char_b in vowels
# Two vowels
if a_is_vowel and b_is_vowel:
return (
0.5
if (char_a + char_b in vowels_graph or char_b + char_a in vowels_graph)
else 1
)
# Two consonants
elif not a_is_vowel and not b_is_vowel:
return 1
else:
return 2
def weighted_distance(w1, w2):
weighted_levenshtein = WeightedLevenshtein(
substitution_cost_fn=substitution_cost,
insertion_cost_fn=lambda c: 1,
deletion_cost_fn=lambda c: 1,
)
return weighted_levenshtein.distance(w1, w2)
def lexical_similarity(w1, w2):
normalized_levenshtein = NormalizedLevenshtein()
return normalized_levenshtein.similarity(w1, w2)
def process_pair(t):
w1, w2, e1, e2, p1, p2 = t
l = lexical_similarity(w1, w2)
e = distance.euclidean(e1, e2)
c = distance.cosine(e1, e2)
pd = lexical_similarity(p1, p2)
wd = weighted_distance(p1, p2)
return [w1, w2, l, e, c, pd, wd]
import pickle
import os.path
from joblib import Parallel, delayed
if os.path.isfile("./weights"):
print("Loading file")
with open("weights", "rb") as fd:
weights = pickle.load(fd)
print("File loaded")
else:
arg_instances = [
(
w1,
w2,
word2encoding_ca[w1],
word2encoding_es[w2],
word2phonetics_ca[w1],
word2phonetics_es[w2],
)
for w1 in ca_words
for w2 in sorted_es[w1[0]]
]
weights = Parallel(n_jobs=-1, verbose=1)(map(delayed(process_pair), arg_instances))
weights = np.array(weights)
with open("weights", "wb") as fd:
pickle.dump(weights, fd)
if False:
w1s = weights[:, 0]
w2s = weights[:, 1]
ls = [float(l) for l in weights[:, 2]]
es = [float(e) for e in weights[:, 3]]
cs = [1 - float(c) for c in weights[:, 4]]
# PHONETIC1 = [float(t) for t in weights[:, 5]]
PHONETIC2 = [float(t) for t in weights[:, 6]]
Y = np.linspace(1, 0, num=11)
X = np.linspace(0, 1, num=11)
pesos = np.zeros((len(Y), len(X)), dtype=np.int32)
for iY, y in enumerate(Y):
for iX, x in enumerate(X):
total = 0
for w1, w2, l, e, c, f2 in zip(w1s, w2s, ls, es, cs, PHONETIC2):
if l > y and c < x and f2 < 5:
total += 1
pesos[iY][iX] = total
print("Done ", iY, iX)
print(pesos)
for f in pesos:
for c in f:
print(c, end=" ")
print()
W1 = weights[:, 0]
W2 = weights[:, 1]
LEXICAL = [float(t) for t in weights[:, 2]]
EUCL = [float(t) for t in weights[:, 3]]
COSINE = [1 - float(t) for t in weights[:, 4]]
PHONETIC1 = [float(t) for t in weights[:, 5]]
PHONETIC2 = [float(t) for t in weights[:, 6]]
x = COSINE
y = PHONETIC2
X_LABEL = "Cosine similarity"
Y_LABEL = "Phonetic normalized similarity"
if False:
from scipy import stats
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
print("El valor es", r_value ** 2)
centro_x = sum(x) / len(x)
centro_y = sum(y) / len(y)
print("Los centros son", centro_x, centro_y)
heatmap, xedges, yedges = np.histogram2d(x, y, bins=150)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin="lower")
plt.xlabel(X_LABEL)
plt.ylabel(Y_LABEL)
plt.show()
n, bins, patches = plt.hist(x=x, bins=100, color="#0504aa", alpha=0.7, rwidth=0.85)
plt.xlabel(X_LABEL)
plt.show()
n, bins, patches = plt.hist(x=y, bins=100, color="#0504aa", alpha=0.7, rwidth=0.85)
plt.xlabel(Y_LABEL)
plt.show()
if False:
# An "interface" to matplotlib.axes.Axes.hist() method
n, bins, patches = plt.hist(
x=weights, bins="auto", color="#0504aa", alpha=0.7, rwidth=0.85
)
plt.grid(axis="y", alpha=0.75)
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.title("My Very Own Histogram")
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.show()
print("done")
# List
for w1, w2, l, e, c, pd, wd in zip(W1, W2, LEXICAL, EUCL, COSINE, PHONETIC1, PHONETIC2):
if l > 0.8 and c < 0.5 and wd > 5:
print(w1, w2)
|
the-stack_106_16604
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import onnx
import onnxruntime as ort
from torch.nn import CrossEntropyLoss
from tqdm import tqdm, trange
from transformers import (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer)
}
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path='train', block_size=512):
assert os.path.isfile(file_path)
directory, filename = os.path.split(file_path)
if not os.path.exists("./dataset_cached"):
os.makedirs("./dataset_cached")
cached_features_file = os.path.join("./dataset_cached",
args.model_name_or_path + '_cached_lm_' + str(block_size) + '_' + filename)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, 'rb') as handle:
self.examples = pickle.load(handle)
else:
logger.info("Creating features from dataset file at %s", directory)
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(0, len(tokenized_text)-block_size+1, block_size): # Truncate in block of block_size
self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:i+block_size]))
# Note that we are loosing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
logger.info("Saving features into cached file %s", cached_features_file)
with open(cached_features_file, 'wb') as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item])
def load_and_cache_examples(args, tokenizer, evaluate=False):
dataset = TextDataset(tokenizer, args, file_path=args.eval_data_file, block_size=args.block_size)
return dataset
def evaluate(args, model, tokenizer, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
import timeit
total_time = 0.0
options = ort.SessionOptions()
session = ort.InferenceSession(model.SerializeToString(), options)
len_outputs = len(session.get_outputs())
len_inputs = len(session.get_inputs())
inputs_names = [session.get_inputs()[i].name for i in range(len_inputs)]
ort_inputs = {}
for idx, batch in enumerate(tqdm(eval_dataloader, desc="Evaluating")):
if nb_eval_steps >= args.warmup_steps:
start = timeit.default_timer()
inputs, labels = (batch, batch)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
for i in range(len_inputs):
inputs = np.array(inputs)
inputs = np.expand_dims(inputs, axis=0)
ort_inputs.update({inputs_names[i]: inputs})
predictions = session.run(None, ort_inputs)
lm_logits = predictions[0]
lm_logits = torch.from_numpy(lm_logits)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
if nb_eval_steps >= args.warmup_steps:
total_time += (timeit.default_timer() - start)
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
if args.iter > 0 and nb_eval_steps > (args.warmup_steps + args.iter):
break
if nb_eval_steps >= args.warmup_steps:
perf = (nb_eval_steps - args.warmup_steps) * args.eval_batch_size / total_time
if args.eval_batch_size == 1:
print('Latency: %.3f ms' % (total_time / (nb_eval_steps - args.warmup_steps) * 1000))
print("Throughput: {} samples/s".format(perf))
else:
logger.info("*****no performance, please check dataset length and warmup number *****")
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {
"perplexity": perplexity
}
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
if args.benchmark and args.mode == "accuracy":
print("Batch size = %d" % args.eval_batch_size)
print("Accuracy: %.5f" % (100 - result['perplexity']))
return 100 - result['perplexity']
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument('--model_path', type=str, required=True,
help='Pre-trained bert model onnx file.')
parser.add_argument("--eval_data_file", type=str, required=True,
help="Input evaluation data file to evaluate the perplexity on (a text file).")
## Other parameters
parser.add_argument("--model_type", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--model_name_or_path", type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--cache_dir", default="", type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--tune',action='store_true', default=False,
help='Get bert tuning quantization model with lpot.')
parser.add_argument('--config',type=str,
help='Tuning config file path')
parser.add_argument('--output_model',type=str, default='gpt2_tune.onnx',
help='output model path and name')
parser.add_argument('--benchmark',action='store_true', default=False,
help='Get benchmark performance of quantized model.')
parser.add_argument('--mode', type=str,
help="benchmark mode of performance or accuracy")
parser.add_argument("--warmup_steps", default=10, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('-i', "--iter", default=0, type=int,
help='For accuracy measurement only.')
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path,
do_lower_case=False,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.block_size <= 0:
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
logger.info("Training/evaluation parameters %s", args)
model = onnx.load(args.model_path)
ds = load_and_cache_examples(args, tokenizer, evaluate=True)
def eval_func(model):
return evaluate(args, model, tokenizer)
if args.benchmark:
evaluate(args, model, tokenizer)
if args.tune:
# GPT2 optimizer
from onnxruntime.transformers import optimizer
from onnxruntime.transformers.onnx_model_bert import BertOptimizationOptions
opt_options = BertOptimizationOptions('gpt2')
opt_options.enable_embed_layer_norm = False
model_optimizer = optimizer.optimize_model(
args.model_path,
'gpt2',
num_heads=12,
hidden_size=768,
optimization_options=opt_options)
model = model_optimizer.model
from lpot.experimental import Quantization, common
quantize = Quantization(args.config)
quantize.model = common.Model(model)
quantize.calib_dataloader = common.DataLoader(ds, batch_size=args.per_gpu_eval_batch_size)
quantize.eval_func = eval_func
q_model = quantize()
q_model.save(args.output_model)
if __name__ == "__main__":
main()
|
the-stack_106_16608
|
import os
import torch
import math
import numpy as np
from scipy.spatial.transform import Rotation as R
from math import radians
from PIL import Image, ImageFilter
from dataset.data_utils import process_viewpoint_label, TransLightning, resize_pad, random_crop
import torchvision.transforms as transforms
from model.resnet import resnet50
from model.vp_estimator import BaselineEstimator, Estimator
#img_name = "../test_img/HICO_train2015_00000001.jpg"
img_name = "../test_img/Screenshot 2022-01-26 120213.png"
#img_name = "../test_img/simple-knife.jpg"
im = Image.open(img_name).convert('RGB')
im_pos = im.copy()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
im_transform = transforms.Compose([transforms.ToTensor(), normalize])
#im = im.crop((left, upper, right, lower))
im_flip = im.transpose(Image.FLIP_LEFT_RIGHT)
im = resize_pad(im, 224)
im_flip = resize_pad(im_flip, 224)
im = im_transform(im)
im_flip = im_transform(im_flip)
im = im_flip
# ================CREATE NETWORK============================ #
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
net_feat = resnet50(num_classes=128)
net_vp = BaselineEstimator(img_feature_dim=2048)
net_feat.cuda()
net_vp.cuda()
state = torch.load("../exps/PoseContrast_ObjectNet3d_ZeroShot/ckpt.pth")
net_feat.load_state_dict(state['net_feat'])
net_vp.load_state_dict(state['net_vp'])
net_feat.eval()
net_vp.eval()
im = im[None, :].cuda()
feat, _ = net_feat(im)
out = net_vp(feat)
vp_pred, score = net_vp.compute_vp_pred(out, True)
print(vp_pred)
print(score)
azi, ele, inp = vp_pred[0]
print(azi, ele, inp)
ele = ele - 90
rol = inp - 180
print(azi, ele, rol)
azi = radians(azi)
ele = radians(ele)
rol = radians(rol)
r = 3
loc_y = r * math.cos(ele) * math.cos(azi)
loc_x = r * math.cos(ele) * math.sin(azi)
loc_z = r * math.sin(ele)
print(loc_x, loc_y, loc_z + 0.5)
distance = np.sqrt(loc_x ** 2 + loc_y ** 2 + loc_z ** 2)
rotate_val = rol
rotate_axes = (loc_x / distance, loc_y / distance, loc_z / distance)
r = R.from_quat([rol, loc_x / distance, loc_y / distance, loc_z / distance])
print("..............")
up = np.array([0, 1, 0])
front = np.array([1, 0, 0])
print(r.apply(up))
print(r.apply(front))
#print(quat * up)
#print(quat * front)
|
the-stack_106_16611
|
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
rangeSum = -float("inf")
globalSum = -float("inf")
for i, num in enumerate(nums):
rangeSum = max(rangeSum, 0) + num
globalSum = max(globalSum, rangeSum)
return globalSum
|
the-stack_106_16612
|
#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate COINBASE_MATURITY (CB) more blocks to ensure the coinbases are mature.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in block CB + 3.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on block CB + 4.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on block CB + 5.
"""
import time
from test_framework.blocktools import (
COINBASE_MATURITY,
NORMAL_GBT_REQUEST_PARAMS,
add_witness_commitment,
create_block,
create_transaction,
)
from test_framework.messages import CTransaction
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if len(newscript) == 0:
assert len(i) == 0
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [[
f'-testactivationheight=segwit@{COINBASE_MATURITY + 5}',
'-addresstype=legacy',
'-par=1', # Use only one script thread to get the exact reject reason for testing
]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[0].get_wallet_rpc('wmulti')
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
self.address = w0.getnewaddress()
self.pubkey = w0.getaddressinfo(self.address)['pubkey']
self.ms_address = wmulti.addmultisigaddress(1, [self.pubkey])['address']
self.wit_address = w0.getnewaddress(address_type='p2sh-segwit')
self.wit_ms_address = wmulti.addmultisigaddress(1, [self.pubkey], '', 'p2sh-segwit')['address']
if not self.options.descriptors:
# Legacy wallets need to import these so that they are watched by the wallet. This is unnecessary (and does not need to be tested) for descriptor wallets
wmulti.importaddress(self.ms_address)
wmulti.importaddress(self.wit_ms_address)
self.coinbase_blocks = self.generate(self.nodes[0], 2) # block height = 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.generate(self.nodes[0], COINBASE_MATURITY) # block height = COINBASE_MATURITY + 2
self.lastblockhash = self.nodes[0].getbestblockhash()
self.lastblockheight = COINBASE_MATURITY + 2
self.lastblocktime = int(time.time()) + self.lastblockheight
self.log.info(f"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [{COINBASE_MATURITY + 3}]")
test1txs = [create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, amount=49)]
txid1 = self.nodes[0].sendrawtransaction(test1txs[0].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], txid1, self.ms_address, amount=48))
txid2 = self.nodes[0].sendrawtransaction(test1txs[1].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=49))
txid3 = self.nodes[0].sendrawtransaction(test1txs[2].serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test1txs, accept=True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = create_transaction(self.nodes[0], txid2, self.ms_address, amount=47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0)
self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]")
self.block_submit(self.nodes[0], [test2tx], accept=True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = create_transaction(self.nodes[0], test2tx.hash, self.address, amount=46)
test6txs = [CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test4tx], accept=False)
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = create_transaction(self.nodes[0], txid3, self.wit_address, amount=48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test5tx], with_witness=True, accept=False)
self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]")
for i in test6txs:
self.nodes[0].sendrawtransaction(i.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test6txs, with_witness=True, accept=True)
def block_submit(self, node, txs, *, with_witness=False, accept):
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
assert_equal(tmpl['previousblockhash'], self.lastblockhash)
assert_equal(tmpl['height'], self.lastblockheight + 1)
block = create_block(tmpl=tmpl, ntime=self.lastblocktime + 1)
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
if with_witness:
add_witness_commitment(block)
block.solve()
assert_equal(None if accept else NULLDUMMY_ERROR, node.submitblock(block.serialize().hex()))
if accept:
assert_equal(node.getbestblockhash(), block.hash)
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
|
the-stack_106_16613
|
import numpy as np
import matplotlib.pyplot as pt
import pickle as pk
np.set_printoptions(precision=4, linewidth=200)
# g = np.arctanh
# rho = .99
g = lambda x: x
rho = 1
def run_trial(N, P, rho, verbose = False):
X = np.random.choice([-1,1], (N,P)) * rho
Y = np.random.choice([-1,1], (N,P)) * rho
W = np.zeros((N,N))
means = []
T = P
for t in range(T):
W += (g(Y[:,[t]]) - W.dot(X[:,[t]])) * X[:,[t]].T / (N * rho**2)
means.append( np.mean(g(Y[:,:t+1]) * W.dot(X[:,:t+1])) )
if verbose: print("t=%d: ~%f" % (t, means[-1]))
return means
if __name__ == "__main__":
N, P = 50, 400
means = run_trial(N, P, rho, verbose = True)
pt.plot(means)
pt.show()
|
the-stack_106_16614
|
from robot.api.parsing import ModelTransformer, Token
try:
from robot.api.parsing import InlineIfHeader
except ImportError:
InlineIfHeader = None
from robotidy.disablers import skip_section_if_disabled
from robotidy.utils import ROBOT_VERSION
EOL = Token(Token.EOL)
CONTINUATION = Token(Token.CONTINUATION)
class SplitTooLongLine(ModelTransformer):
"""
Split too long lines.
If any line in keyword call exceeds given length limit (120 by default) it will be
split:
Keyword With Longer Name ${arg1} ${arg2} ${arg3} # let's assume that arg2 is at 120 char
To:
# let's assume that arg2 is at 120 char
Keyword With Longer Name
... ${arg1}
... ${arg2}
... ${arg3}
Allowed line length is configurable using global parameter ``--line-length``:
robotidy --line-length 140 src.robot
Or using dedicated for this transformer parameter ``line_length``:
robotidy --configure SplitTooLongLine:line_length:140 src.robot
Using ``split_on_every_arg`` flag (``True`` by default), you can force the formatter to fill arguments in one line
until character limit:
Keyword With Longer Name ${arg1}
... ${arg2} ${arg3}
Supports global formatting params: ``spacecount``, ``separator``, ``--startline`` and ``--endline``.
See https://robotidy.readthedocs.io/en/latest/transformers/SplitTooLongLine.html for more examples.
"""
def __init__(self, line_length: int = None, split_on_every_arg: bool = True):
super().__init__()
self._line_length = line_length
self.split_on_every_arg = split_on_every_arg
@property
def line_length(self):
return self.formatting_config.line_length if self._line_length is None else self._line_length
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
def visit_If(self, node): # noqa
if self.is_inline(node):
return node
if node.orelse:
self.generic_visit(node.orelse)
return self.generic_visit(node)
@staticmethod
def is_inline(node):
return ROBOT_VERSION.major > 4 and isinstance(node.header, InlineIfHeader)
def visit_KeywordCall(self, node): # noqa
if all(line[-1].end_col_offset < self.line_length for line in node.lines):
return node
if self.disablers.is_node_disabled(node, full_match=False):
return node
return self.split_keyword_call(node)
@staticmethod
def join_on_separator(tokens, separator):
for token in tokens:
yield token
yield separator
@staticmethod
def split_to_multiple_lines(tokens, indent, separator):
first = True
for token in tokens:
yield indent
if not first:
yield CONTINUATION
yield separator
yield token
yield EOL
first = False
def split_keyword_call(self, node):
separator = Token(Token.SEPARATOR, self.formatting_config.separator)
indent = node.tokens[0]
split_every_arg = self.split_on_every_arg
keyword = node.get_token(Token.KEYWORD)
line = [indent, *self.join_on_separator(node.get_tokens(Token.ASSIGN), separator), keyword]
if not self.col_fit_in_line(line):
split_every_arg
head = [
*self.split_to_multiple_lines(node.get_tokens(Token.ASSIGN), indent=indent, separator=separator),
indent,
CONTINUATION,
separator,
keyword,
]
line = []
else:
head = []
comments = []
# Comments with separators inside them are split into
# [COMMENT, SEPARATOR, COMMENT] tokens in the AST, so in order to preserve the
# original comment, we need a lookback on the separator tokens.
last_separator = None
rest = node.tokens[node.tokens.index(keyword) + 1 :]
for token in rest:
if token.type == Token.SEPARATOR:
last_separator = token
elif token.type in {Token.EOL, Token.CONTINUATION}:
continue
elif token.type == Token.COMMENT:
# AST splits comments with separators, e.g.
#
# "# Comment rest" -> ["# Comment", " ", "rest"].
#
# Notice the third value not starting with a hash - that's what this
# condition is about:
if not str(token).startswith("#"):
# -2 because -1 is the EOL
comments[-2].value += last_separator.value + token.value
else:
comments += [indent, token, EOL]
elif token.type == Token.ARGUMENT:
if token.value == "":
token.value = "${EMPTY}"
if self.split_on_every_arg or not self.col_fit_in_line(line + [separator, token]):
line.append(EOL)
head += line
line = [indent, CONTINUATION, separator, token]
else:
line += [separator, token]
# last line
line.append(EOL)
head += line
node.tokens = comments + head
return node
def col_fit_in_line(self, tokens):
return self.len_token_text(tokens) < self.line_length
@staticmethod
def len_token_text(tokens):
return sum(len(token.value) for token in tokens)
|
the-stack_106_16615
|
import click
import progressbar
from ..models import ImageModel
from ..imagebutler import db
@click.group()
def image():
pass
@image.command('gen_thumbnail')
@click.option('--type')
def thumbnail_regen(**kwargs):
"""Generate thumbnail for all|missing images. For first implement, we
do not care about performance, save that task for later."""
if kwargs['type'] in ('all', 'missing'):
images_progressed = 0
if kwargs['type'] == 'all':
images_count = ImageModel.query.count()
images = ImageModel.query.all()
else:
images_count = ImageModel.query.filter(
ImageModel.file_thumbnail.is_(None)
).count()
images = ImageModel.query.filter(
ImageModel.file_thumbnail.is_(None)
).all()
bar = progressbar.ProgressBar(max_value=images_count)
for processing_image in images:
processing_image.file_thumbnail = processing_image.gen_thumbnail()
db.session.commit()
images_progressed += 1
bar.update(images_progressed)
else:
click.echo('Usage: flask image gen_thumbnail --type all|missing',
err=True)
|
the-stack_106_16616
|
# encoding: utf-8
"""Tests for io.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from __future__ import absolute_import
import io as stdlib_io
import os.path
import stat
import sys
from subprocess import Popen, PIPE
import unittest
import nose.tools as nt
from IPython.testing.decorators import skipif, skip_win32
from IPython.utils.io import Tee, capture_output
from IPython.utils.py3compat import doctest_refactor_print, PY3
from IPython.utils.tempdir import TemporaryDirectory
if PY3:
from io import StringIO
else:
from StringIO import StringIO
def test_tee_simple():
"Very simple check with stdout only"
chan = StringIO()
text = 'Hello'
tee = Tee(chan, channel='stdout')
print(text, file=chan)
nt.assert_equal(chan.getvalue(), text+"\n")
class TeeTestCase(unittest.TestCase):
def tchan(self, channel, check='close'):
trap = StringIO()
chan = StringIO()
text = 'Hello'
std_ori = getattr(sys, channel)
setattr(sys, channel, trap)
tee = Tee(chan, channel=channel)
print(text, end='', file=chan)
setattr(sys, channel, std_ori)
trap_val = trap.getvalue()
nt.assert_equal(chan.getvalue(), text)
if check=='close':
tee.close()
else:
del tee
def test(self):
for chan in ['stdout', 'stderr']:
for check in ['close', 'del']:
self.tchan(chan, check)
def test_io_init():
"""Test that io.stdin/out/err exist at startup"""
for name in ('stdin', 'stdout', 'stderr'):
cmd = doctest_refactor_print("from IPython.utils import io;print io.%s.__class__"%name)
p = Popen([sys.executable, '-c', cmd],
stdout=PIPE)
p.wait()
classname = p.stdout.read().strip().decode('ascii')
# __class__ is a reference to the class object in Python 3, so we can't
# just test for string equality.
assert 'IPython.utils.io.IOStream' in classname, classname
def test_capture_output():
"""capture_output() context works"""
with capture_output() as io:
print('hi, stdout')
print('hi, stderr', file=sys.stderr)
nt.assert_equal(io.stdout, 'hi, stdout\n')
nt.assert_equal(io.stderr, 'hi, stderr\n')
|
the-stack_106_16618
|
import setuptools
with open("project-description.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name="pyserini",
version="0.16.0",
author="Jimmy Lin",
author_email="[email protected]",
description="A Python toolkit for reproducible information retrieval research with sparse and dense representations",
long_description=long_description,
long_description_content_type="text/markdown",
package_data={"pyserini": [
"resources/jars/anserini-0.14.1-fatjar.jar",
]},
url="https://github.com/castorini/pyserini",
install_requires=requirements,
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
the-stack_106_16619
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Multi-channel CSC
=================
This example demonstrates solving a convolutional sparse coding problem with a greyscale dictionary and a colour signal :cite:`wohlberg-2017-sporco`
$$\mathrm{argmin}_\mathbf{x} \; (1/2) \sum_c \left\| \sum_m \mathbf{d}_m * \mathbf{x}_{c,m} - \mathbf{s}_c \right\|_2^2 + \lambda \sum_c \sum_m \| \mathbf{x}_{c,m} \|_1 + \mu \| \{ \mathbf{x}_{c,m} \} \|_{2,1} \;,$$
where $\mathbf{d}_{m}$ is the $m^{\text{th}}$ dictionary filter, $\mathbf{x}_{c,m}$ is the coefficient map corresponding to the $m^{\text{th}}$ dictionary filter and channel $c$ of the input image, and $\mathbf{s}_c$ is channel $c$ of the input image.
"""
from __future__ import print_function
from builtins import input
from builtins import range
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from sporco import util
from sporco import plot
import sporco.metric as sm
from sporco.admm import cbpdn
"""
Load example image.
"""
img = util.ExampleImages().image('kodim23.png', scaled=True,
idxexp=np.s_[160:416,60:316])
"""
Highpass filter example image.
"""
npd = 16
fltlmbd = 10
sl, sh = util.tikhonov_filter(img, fltlmbd, npd)
"""
Load greyscale dictionary and display it.
"""
D = util.convdicts()['G:8x8x64']
plot.imview(util.tiledict(D), fgsz=(7, 7))
"""
Set :class:`.admm.cbpdn.ConvBPDNJoint` solver options.
"""
lmbda = 1e-1
mu = 1e-2
opt = cbpdn.ConvBPDNJoint.Options({'Verbose': True, 'MaxMainIter': 200,
'RelStopTol': 5e-3, 'AuxVarObj': False})
"""
Initialise and run CSC solver.
"""
b = cbpdn.ConvBPDNJoint(D, sh, lmbda, mu, opt, dimK=0)
X = b.solve()
print("ConvBPDNJoint solve time: %.2fs" % b.timer.elapsed('solve'))
"""
Reconstruct image from sparse representation.
"""
shr = b.reconstruct().squeeze()
imgr = sl + shr
print("Reconstruction PSNR: %.2fdB\n" % sm.psnr(img, imgr))
"""
Display low pass component and sum of absolute values of coefficient maps of highpass component.
"""
gamma = lambda x, g: x**g
fig = plot.figure(figsize=(14, 7))
plot.subplot(1, 2, 1)
plot.imview(sl, title='Lowpass component', fig=fig)
plot.subplot(1, 2, 2)
plot.imview(gamma(np.sum(abs(X), axis=b.cri.axisM).squeeze(), 0.4),
title='Sparse representation', fig=fig)
fig.show()
"""
Display original and reconstructed images.
"""
fig = plot.figure(figsize=(14, 7))
plot.subplot(1, 2, 1)
plot.imview(img, title='Original', fig=fig)
plot.subplot(1, 2, 2)
plot.imview(imgr, title='Reconstructed', fig=fig)
fig.show()
"""
Get iterations statistics from solver object and plot functional value, ADMM primary and dual residuals, and automatically adjusted ADMM penalty parameter against the iteration number.
"""
its = b.getitstat()
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.plot(its.ObjFun, fig=fig, xlbl='Iterations', ylbl='Functional')
plot.subplot(1, 3, 2)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, fig=fig,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'])
plot.subplot(1, 3, 3)
plot.plot(its.Rho, fig=fig, xlbl='Iterations', ylbl='Penalty Parameter')
fig.show()
# Wait for enter on keyboard
input()
|
the-stack_106_16620
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from wildcard.openstack.common.gettextutils import _ # noqa
from wildcard.openstack.common import importutils
from wildcard.openstack.common import jsonutils
from wildcard.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config):
try:
logging.config.fileConfig(log_config)
except moves.configparser.Error as exc:
raise LogConfigError(log_config, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"wildcard.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
the-stack_106_16622
|
import mmcv
import numpy as np
import torch
from torch.utils.data import Dataset
from .builder import DATASETS
def create_real_pyramid(real, min_size, max_size, scale_factor_init):
"""Create image pyramid.
This function is modified from the official implementation:
https://github.com/tamarott/SinGAN/blob/master/SinGAN/functions.py#L221
In this implementation, we adopt the rescaling function from MMCV.
Args:
real (np.array): The real image array.
min_size (int): The minimum size for the image pyramid.
max_size (int): The maximum size for the image pyramid.
scale_factor_init (float): The initial scale factor.
"""
num_scales = int(
np.ceil(
np.log(
np.power(min_size / min(real.shape[0], real.shape[1]), 1),
scale_factor_init))) + 1
scale2stop = int(
np.ceil(
np.log(
min([max_size, max([real.shape[0], real.shape[1]])]) /
max([real.shape[0], real.shape[1]]), scale_factor_init)))
stop_scale = num_scales - scale2stop
scale1 = min(max_size / max([real.shape[0], real.shape[1]]), 1)
real_max = mmcv.imrescale(real, scale1)
scale_factor = np.power(
min_size / (min(real_max.shape[0], real_max.shape[1])),
1 / (stop_scale))
scale2stop = int(
np.ceil(
np.log(
min([max_size, max([real.shape[0], real.shape[1]])]) /
max([real.shape[0], real.shape[1]]), scale_factor_init)))
stop_scale = num_scales - scale2stop
reals = []
for i in range(stop_scale + 1):
scale = np.power(scale_factor, stop_scale - i)
curr_real = mmcv.imrescale(real, scale)
reals.append(curr_real)
return reals, scale_factor, stop_scale
@DATASETS.register_module()
class SinGANDataset(Dataset):
"""SinGAN Dataset.
In this dataset, we create an image pyramid and save it in the cache.
Args:
img_path (str): Path to the single image file.
min_size (int): Min size of the image pyramid. Here, the number will be
set to the ``min(H, W)``.
max_size (int): Max size of the image pyramid. Here, the number will be
set to the ``max(H, W)``.
scale_factor_init (float): Rescale factor. Note that the actual factor
we use may be a little bit different from this value.
num_samples (int, optional): The number of samples (length) in this
dataset. Defaults to -1.
"""
def __init__(self,
img_path,
min_size,
max_size,
scale_factor_init,
num_samples=-1):
self.img_path = img_path
assert mmcv.is_filepath(self.img_path)
self.load_annotations(min_size, max_size, scale_factor_init)
self.num_samples = num_samples
def load_annotations(self, min_size, max_size, scale_factor_init):
"""Load annatations for SinGAN Dataset.
Args:
min_size (int): The minimum size for the image pyramid.
max_size (int): The maximum size for the image pyramid.
scale_factor_init (float): The initial scale factor.
"""
real = mmcv.imread(self.img_path)
self.reals, self.scale_factor, self.stop_scale = create_real_pyramid(
real, min_size, max_size, scale_factor_init)
self.data_dict = {}
for i, real in enumerate(self.reals):
self.data_dict[f'real_scale{i}'] = self._img2tensor(real)
self.data_dict['input_sample'] = torch.zeros_like(
self.data_dict['real_scale0'])
def _img2tensor(self, img):
img = torch.from_numpy(img).to(torch.float32).permute(2, 0,
1).contiguous()
img = (img / 255 - 0.5) * 2
return img
def __getitem__(self, index):
return self.data_dict
def __len__(self):
return int(1e6) if self.num_samples < 0 else self.num_samples
|
the-stack_106_16623
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "c4/policyengine/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
the-stack_106_16625
|
"""Launchpad custom external dependencies."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# Sanitize a dependency so that it works correctly from code that includes
# Launchpad as a submodule.
def clean_dep(dep):
return str(Label(dep))
def get_python_path(ctx):
path = ctx.os.environ.get("PYTHON_BIN_PATH")
if not path:
fail(
"Could not get environment variable PYTHON_BIN_PATH. " +
"Check your .bazelrc file.",
)
return path
def _find_tf_include_path(repo_ctx):
exec_result = repo_ctx.execute(
[
get_python_path(repo_ctx),
"-c",
"import tensorflow as tf; import sys; " +
"sys.stdout.write(tf.sysconfig.get_include())",
],
quiet = True,
)
if exec_result.return_code != 0:
fail("Could not locate tensorflow installation path:\n{}"
.format(exec_result.stderr))
return exec_result.stdout.splitlines()[-1]
def _find_tf_lib_path(repo_ctx):
exec_result = repo_ctx.execute(
[
get_python_path(repo_ctx),
"-c",
"import tensorflow as tf; import sys; " +
"sys.stdout.write(tf.sysconfig.get_lib())",
],
quiet = True,
)
if exec_result.return_code != 0:
fail("Could not locate tensorflow installation path:\n{}"
.format(exec_result.stderr))
return exec_result.stdout.splitlines()[-1]
def _find_numpy_include_path(repo_ctx):
exec_result = repo_ctx.execute(
[
get_python_path(repo_ctx),
"-c",
"import numpy; import sys; " +
"sys.stdout.write(numpy.get_include())",
],
quiet = True,
)
if exec_result.return_code != 0:
fail("Could not locate numpy includes path:\n{}"
.format(exec_result.stderr))
return exec_result.stdout.splitlines()[-1]
def _find_python_include_path(repo_ctx):
exec_result = repo_ctx.execute(
[
get_python_path(repo_ctx),
"-c",
"from distutils import sysconfig; import sys; " +
"sys.stdout.write(sysconfig.get_python_inc())",
],
quiet = True,
)
if exec_result.return_code != 0:
fail("Could not locate python includes path:\n{}"
.format(exec_result.stderr))
return exec_result.stdout.splitlines()[-1]
def _find_python_solib_path(repo_ctx):
exec_result = repo_ctx.execute(
[
get_python_path(repo_ctx),
"-c",
"import sys; vi = sys.version_info; " +
"sys.stdout.write('python{}.{}'.format(vi.major, vi.minor))",
],
)
if exec_result.return_code != 0:
fail("Could not locate python shared library path:\n{}"
.format(exec_result.stderr))
version = exec_result.stdout.splitlines()[-1]
basename = "lib{}.so".format(version)
exec_result = repo_ctx.execute(
["{}-config".format(version), "--configdir"],
quiet = True,
)
if exec_result.return_code != 0:
fail("Could not locate python shared library path:\n{}"
.format(exec_result.stderr))
solib_dir = exec_result.stdout.splitlines()[-1]
full_path = repo_ctx.path("{}/{}".format(solib_dir, basename))
if not full_path.exists:
fail("Unable to find python shared library file:\n{}/{}"
.format(solib_dir, basename))
return struct(basename = basename, dir = solib_dir)
def _eigen_archive_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(tf_include_path, "tf_includes")
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "includes",
hdrs = glob(["tf_includes/Eigen/**/*.h",
"tf_includes/Eigen/**",
"tf_includes/unsupported/Eigen/**/*.h",
"tf_includes/unsupported/Eigen/**"]),
# https://groups.google.com/forum/#!topic/bazel-discuss/HyyuuqTxKok
includes = ["tf_includes"],
visibility = ["//visibility:public"],
)
""",
executable = False,
)
def _nsync_includes_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(tf_include_path + "/external", "nsync_includes")
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "includes",
hdrs = glob(["nsync_includes/nsync/public/*.h"]),
includes = ["nsync_includes"],
visibility = ["//visibility:public"],
)
""",
executable = False,
)
def _zlib_includes_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(
tf_include_path + "/external/zlib",
"zlib",
)
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "includes",
hdrs = glob(["zlib/**/*.h"]),
includes = ["zlib"],
visibility = ["//visibility:public"],
)
""",
executable = False,
)
def _snappy_includes_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(
tf_include_path + "/external/snappy",
"snappy",
)
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "includes",
hdrs = glob(["snappy/*.h"]),
includes = ["snappy"],
visibility = ["//visibility:public"],
)
""",
executable = False,
)
def _protobuf_includes_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(tf_include_path, "tf_includes")
repo_ctx.symlink(Label("//third_party:protobuf.BUILD"), "BUILD")
def _tensorflow_includes_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(tf_include_path, "tensorflow_includes")
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "includes",
hdrs = glob(
[
"tensorflow_includes/**/*.h",
"tensorflow_includes/third_party/eigen3/**",
],
exclude = ["tensorflow_includes/absl/**/*.h"],
),
includes = ["tensorflow_includes"],
deps = [
"@eigen_archive//:includes",
"@protobuf_archive//:includes",
"@zlib_includes//:includes",
"@snappy_includes//:includes",
],
visibility = ["//visibility:public"],
)
filegroup(
name = "protos",
srcs = glob(["tensorflow_includes/**/*.proto"]),
visibility = ["//visibility:public"],
)
""",
executable = False,
)
def _tensorflow_solib_repo_impl(repo_ctx):
tf_lib_path = _find_tf_lib_path(repo_ctx)
repo_ctx.symlink(tf_lib_path, "tensorflow_solib")
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "framework_lib",
srcs = ["tensorflow_solib/libtensorflow_framework.so.2"],
deps = ["@python_includes", "@python_includes//:numpy_includes"],
visibility = ["//visibility:public"],
)
""",
)
def _python_includes_repo_impl(repo_ctx):
python_include_path = _find_python_include_path(repo_ctx)
python_solib = _find_python_solib_path(repo_ctx)
repo_ctx.symlink(python_include_path, "python_includes")
numpy_include_path = _find_numpy_include_path(repo_ctx)
repo_ctx.symlink(numpy_include_path, "numpy_includes")
repo_ctx.symlink(
"{}/{}".format(python_solib.dir, python_solib.basename),
python_solib.basename,
)
# Note, "@python_includes" is a misnomer since we include the
# libpythonX.Y.so in the srcs, so we can get access to python's various
# symbols at link time.
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "python_includes",
hdrs = glob(["python_includes/**/*.h"]),
srcs = ["{}"],
includes = ["python_includes"],
visibility = ["//visibility:public"],
)
cc_library(
name = "numpy_includes",
hdrs = glob(["numpy_includes/**/*.h"]),
includes = ["numpy_includes"],
visibility = ["//visibility:public"],
)
""".format(python_solib.basename),
executable = False,
)
def cc_tf_configure():
"""Autoconf pre-installed tensorflow repo."""
make_eigen_repo = repository_rule(implementation = _eigen_archive_repo_impl)
make_eigen_repo(name = "eigen_archive")
make_nsync_repo = repository_rule(
implementation = _nsync_includes_repo_impl,
)
make_nsync_repo(name = "nsync_includes")
make_zlib_repo = repository_rule(
implementation = _zlib_includes_repo_impl,
)
make_zlib_repo(name = "zlib_includes")
make_snappy_repo = repository_rule(
implementation = _snappy_includes_repo_impl,
)
make_snappy_repo(name = "snappy_includes")
make_protobuf_repo = repository_rule(
implementation = _protobuf_includes_repo_impl,
)
make_protobuf_repo(name = "protobuf_archive")
make_tfinc_repo = repository_rule(
implementation = _tensorflow_includes_repo_impl,
)
make_tfinc_repo(name = "tensorflow_includes")
make_tflib_repo = repository_rule(
implementation = _tensorflow_solib_repo_impl,
)
make_tflib_repo(name = "tensorflow_solib")
make_python_inc_repo = repository_rule(
implementation = _python_includes_repo_impl,
)
make_python_inc_repo(name = "python_includes")
def lp_python_deps():
http_archive(
name = "pybind11",
build_file = clean_dep("//third_party:pybind11.BUILD"),
sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d",
strip_prefix = "pybind11-2.4.3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/pybind/pybind11/archive/v2.4.3.tar.gz",
"https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz",
],
)
http_archive(
name = "absl_py",
sha256 = "603febc9b95a8f2979a7bdb77d2f5e4d9b30d4e0d59579f88eba67d4e4cc5462",
strip_prefix = "abseil-py-pypi-v0.9.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
"https://github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
],
)
def _lp_protoc_archive(ctx):
version = ctx.attr.version
sha256 = ctx.attr.sha256
override_version = ctx.os.environ.get("LP_PROTOC_VERSION")
if override_version:
sha256 = ""
version = override_version
urls = [
"https://github.com/protocolbuffers/protobuf/releases/download/v%s/protoc-%s-linux-x86_64.zip" % (version, version),
]
ctx.download_and_extract(
url = urls,
sha256 = sha256,
)
ctx.file(
"BUILD",
content = """
filegroup(
name = "protoc_bin",
srcs = ["bin/protoc"],
visibility = ["//visibility:public"],
)
""",
executable = False,
)
lp_protoc_archive = repository_rule(
attrs = {
"version": attr.string(mandatory = True),
"sha256": attr.string(mandatory = True),
},
implementation = _lp_protoc_archive,
)
def lp_protoc_deps(version, sha256):
lp_protoc_archive(name = "protobuf_protoc", sha256 = sha256, version = version)
|
the-stack_106_16627
|
#transmute: benzodiazepine tool
import sqlite3
import tkinter
from tkinter import ttk
class Database(object):
def __init__(self, filename, table):
self.connection = sqlite3.connect(filename)
with self.connection:
self.connection.row_factory = sqlite3.Row
self.cursor = self.connection.cursor()
self.cursor.execute("SELECT * FROM %s" % table)
self.contents = self.cursor.fetchall()
self.cursor.execute("SELECT Name FROM %s" % table)
self.drugs = self.cursor.fetchall()
def get_drug(self, query):
for drug in self.contents:
if query in drug:
return drug
def drug_dictionary(self):
dump = []
dictionary = {}
for drug in self.drugs:
dump.append(drug)
for i, row in enumerate(dump):
n = []
for col in range(0, len(row)):
n.append(row[col])
dictionary[i] = n
return dictionary
class TransmuteBenzodiazepines(ttk.Frame):
def __init__(self, parent):
ttk.Frame.__init__(self, parent)
self.parent = parent
self.benzodiazepines = Database("drugs.sqlite", "BENZODIAZEPINES")
self.assemble_interface()
def assemble_interface(self):
self.parent.title("Transmute: Benzodiazepines")
self.style = ttk.Style()
self.style.theme_use("default")
self.draw_listboxes(14, self.benzodiazepines.drug_dictionary().values())
self.pack(fill=tkinter.BOTH, expand=1, side=tkinter.LEFT)
self.draw_entry_box()
self.draw_from_field("benzodiazepine")
self.draw_result_label()
self.draw_to_field("benzodiazepine")
frame = ttk.Frame(self, relief=tkinter.FLAT, borderwidth=1)
frame.pack(fill=tkinter.BOTH, expand=1)
self.draw_buttons()
def draw_listboxes(self, height, values):
from_box = tkinter.Listbox(self, height=height)
to_box = tkinter.Listbox(self, height=height)
for drug in sorted(values):
drug_string = str(drug)
drug_string = drug_string.replace("'","").replace("[","").replace("]","")
from_box.insert(tkinter.END, drug_string)
to_box.insert(tkinter.END, drug_string)
from_box.bind("<<ListboxSelect>>", self.clicked_from)
from_box.pack(side=tkinter.LEFT, padx=5, pady=3)
to_box.bind("<<ListboxSelect>>", self.clicked_to)
to_box.pack(side=tkinter.LEFT)
def draw_entry_box(self):
self.dose_entry = ttk.Entry(self, width=35)
self.dose_entry.pack(side=tkinter.TOP, anchor=tkinter.W, padx=5, pady=3)
def draw_result_label(self):
self.result_given = tkinter.StringVar()
self.result_given.set("?")
self.result_label = ttk.Label(self, anchor=tkinter.W, textvariable=self.result_given, width=35, relief=tkinter.RAISED, background="white")
self.result_label.pack(side=tkinter.TOP, anchor=tkinter.W, padx=5, pady=3)
def draw_from_field(self, drug_type):
self.convert_from = tkinter.StringVar()
self.convert_from.set("mg of one "+drug_type+" roughly equals:")
self.label_from = ttk.Label(self, anchor=tkinter.W, textvariable=self.convert_from, width=35)
self.label_from.pack(side=tkinter.TOP, anchor=tkinter.W, padx=5, pady=3)
def draw_to_field(self, drug_type):
self.convert_to = tkinter.StringVar()
self.convert_to.set("mg of another "+drug_type+".")
self.label_to = ttk.Label(self, anchor=tkinter.W, textvariable=self.convert_to)
self.label_to.pack(side=tkinter.TOP, anchor=tkinter.W, padx=5, pady=3)
def draw_buttons(self):
convert_button = ttk.Button(self, text="Convert", command = self.convert)
convert_button.pack(side=tkinter.LEFT, padx=5, pady=3)
exit_button = ttk.Button(self, text="Exit", command = self.kill)
exit_button.pack(side=tkinter.LEFT, padx=5, pady=3)
def convert(self):
db_from = self.benzodiazepines.get_drug(from_drug)
db_to = self.benzodiazepines.get_drug(to_drug)
dose = self.dose_entry.get()
try:
multiplier = float(db_from["CF"])/float(db_to["CF"])
result = float(dose) * multiplier
self.result_given.set(result)
except:
self.result_given.set("Enter a numeric dose above.")
def clicked_from(self, val):
sender = val.widget
idx = sender.curselection()
global from_drug
from_drug = sender.get(idx)
self.convert_from.set("mg of "+from_drug+" roughly equates to:")
def clicked_to(self, val):
sender = val.widget
idx = sender.curselection()
global to_drug
to_drug = sender.get(idx)
self.convert_to.set("mg of "+to_drug+".")
def kill(self):
self.quit()
root = tkinter.Tk()
transmute_benzodiazepines = TransmuteBenzodiazepines(root)
root.mainloop()
|
the-stack_106_16628
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AntMerchantExpandBenefitConfirmModel(object):
def __init__(self):
self._biz_ext = None
self._out_biz_time = None
self._record_id = None
self._user_id = None
@property
def biz_ext(self):
return self._biz_ext
@biz_ext.setter
def biz_ext(self, value):
self._biz_ext = value
@property
def out_biz_time(self):
return self._out_biz_time
@out_biz_time.setter
def out_biz_time(self, value):
self._out_biz_time = value
@property
def record_id(self):
return self._record_id
@record_id.setter
def record_id(self, value):
self._record_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_ext:
if hasattr(self.biz_ext, 'to_alipay_dict'):
params['biz_ext'] = self.biz_ext.to_alipay_dict()
else:
params['biz_ext'] = self.biz_ext
if self.out_biz_time:
if hasattr(self.out_biz_time, 'to_alipay_dict'):
params['out_biz_time'] = self.out_biz_time.to_alipay_dict()
else:
params['out_biz_time'] = self.out_biz_time
if self.record_id:
if hasattr(self.record_id, 'to_alipay_dict'):
params['record_id'] = self.record_id.to_alipay_dict()
else:
params['record_id'] = self.record_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandBenefitConfirmModel()
if 'biz_ext' in d:
o.biz_ext = d['biz_ext']
if 'out_biz_time' in d:
o.out_biz_time = d['out_biz_time']
if 'record_id' in d:
o.record_id = d['record_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
the-stack_106_16629
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from itertools import groupby
from operator import itemgetter
import posixpath
from data_source import DataSource
from extensions_paths import JSON_TEMPLATES, PUBLIC_TEMPLATES
from future import Future
from platform_util import GetPlatforms
class WhatsNewDataSource(DataSource):
''' This class creates a list of "what is new" by chrome version.
'''
def __init__(self, server_instance, _):
self._parse_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetTrunk())
self._object_store = server_instance.object_store_creator.Create(
WhatsNewDataSource)
self._platform_bundle = server_instance.platform_bundle
def _GenerateChangesListWithVersion(self, platform, whats_new_json):
return [{
'id': change_id,
'type': change['type'],
'description': change['description'],
'version': change['version']
} for change_id, change in whats_new_json.iteritems()]
def _GetAPIVersion(self, platform, api_name):
version = None
category = self._platform_bundle.GetAPICategorizer(platform).GetCategory(
api_name)
if category == 'chrome':
channel_info = self._platform_bundle.GetAvailabilityFinder(
platform).GetAPIAvailability(api_name).channel_info
channel = channel_info.channel
if channel == 'stable':
version = channel_info.version
return version
def _GenerateAPIListWithVersion(self, platform):
data = []
for api_name, api_model in self._platform_bundle.GetAPIModels(
platform).IterModels():
version = self._GetAPIVersion(platform, api_name)
if version:
api = {
'name': api_name,
'description': api_model.description,
'version' : version,
'type': 'apis',
}
data.append(api)
data.sort(key=itemgetter('version'))
return data
def _GenerateWhatsNewDict(self):
whats_new_json_future = self._parse_cache.GetFromFile(
posixpath.join(JSON_TEMPLATES, 'whats_new.json'))
def _MakeDictByPlatform(platform):
whats_new_json = whats_new_json_future.Get()
platform_list = []
apis = self._GenerateAPIListWithVersion(platform)
apis.extend(self._GenerateChangesListWithVersion(platform,
whats_new_json))
apis.sort(key=itemgetter('version'), reverse=True)
for version, group in groupby(apis, key=itemgetter('version')):
whats_new_by_version = {
'version': version,
}
for item in group:
item_type = item['type']
if item_type not in whats_new_by_version:
whats_new_by_version[item_type] = []
whats_new_by_version[item_type].append(item)
platform_list.append(whats_new_by_version)
return platform_list
def resolve():
return dict((platform, _MakeDictByPlatform(platform))
for platform in GetPlatforms())
return Future(callback=resolve)
def _GetCachedWhatsNewData(self):
data = self._object_store.Get('whats_new_data').Get()
if data is None:
data = self._GenerateWhatsNewDict().Get()
self._object_store.Set('whats_new_data', data)
return data
def get(self, key):
return self._GetCachedWhatsNewData().get(key)
def Cron(self):
return self._GenerateWhatsNewDict()
|
the-stack_106_16633
|
import re
from django.test import Client
from django.test import TestCase
from reviews.models import Publisher
class Exercise3Test(TestCase):
def test_fields_in_view(self):
""" "
Test that fields exist in the rendered template.
"""
c = Client()
response = c.get("/publishers/new/")
content = response.content.decode("ascii")
content = re.sub(r">\s+<", "><", content)
self.assertIsNotNone(
re.search(
r'<input type="hidden" name="csrfmiddlewaretoken" value="\w+">', content
)
)
self.assertIn(
'<label for="id_name">Name:</label><input type="text" name="name" maxlength="50" required id="id_name">'
'<span class="helptext">The name of the Publisher.</span></p>',
content,
)
self.assertIn(
'<label for="id_website">Website:</label><input type="url" name="website" maxlength="200" '
'required id="id_website"><span class="helptext">The Publisher's website.</span></p>',
content,
)
self.assertIn(
'<label for="id_email">Email:</label><input type="email" name="email" maxlength="254" '
'required id="id_email"><span class="helptext">The Publisher's email address.</span>',
content,
)
self.assertIn('<input type="submit" value="Submit">', content)
def test_publisher_create(self):
"""Test the creation of a new Publisher"""
self.assertEqual(Publisher.objects.all().count(), 0)
c = Client()
publisher_name = "Test Create Publisher"
publisher_website = "http://www.example.com/test-publisher/"
publisher_email = "[email protected]"
response = c.post(
"/publishers/new/",
{
"name": publisher_name,
"website": publisher_website,
"email": publisher_email,
},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Publisher.objects.all().count(), 1)
publisher = Publisher.objects.first()
self.assertEqual(publisher.name, publisher_name)
self.assertEqual(publisher.website, publisher_website)
self.assertEqual(publisher.email, publisher_email)
self.assertEqual(response["Location"], "/publishers/{}/".format(publisher.pk))
def test_publisher_no_create(self):
"""Test that no Publisher is created if the form is invalid."""
self.assertEqual(Publisher.objects.all().count(), 0)
c = Client()
response = c.post(
"/publishers/new/",
{"name": "", "website": "not a url", "email": "not an email"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Publisher.objects.all().count(), 0)
def test_publisher_edit(self):
"""
Test editing a publisher, the initial GET should have a form with values and then the post should update the
Publisher rather than creating a new one.
"""
publisher_name = "Test Edit Publisher"
publisher_website = "http://www.example.com/edit-publisher/"
publisher_email = "[email protected]"
publisher = Publisher(
name=publisher_name, website=publisher_website, email=publisher_email
)
publisher.save()
self.assertEqual(Publisher.objects.all().count(), 1)
c = Client()
response = c.get("/publishers/{}/".format(publisher.pk))
self.assertIn(b'value="Test Edit Publisher"', response.content)
self.assertIn(
b'value="http://www.example.com/edit-publisher/"', response.content
)
self.assertIn(b'value="[email protected]"', response.content)
response = c.post(
"/publishers/{}/".format(publisher.pk),
{
"name": "Updated Name",
"website": "https://www.example.com/updated/",
"email": "[email protected]",
},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Publisher.objects.all().count(), 1)
publisher2 = Publisher.objects.first()
self.assertEqual(publisher2.pk, publisher.pk)
self.assertEqual(publisher2.name, "Updated Name")
self.assertEqual(publisher2.website, "https://www.example.com/updated/")
self.assertEqual(publisher2.email, "[email protected]")
|
the-stack_106_16634
|
import argparse
import signal
from tqdm import tqdm
import catconv.operations as co
import catconv.stabi as sb
exit = False
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
exit = True
parser = argparse.ArgumentParser()
parser.add_argument("source")
parser.add_argument("target")
parser.add_argument("-u", "--update", help="overwrite previous results",
action="store_true")
args = parser.parse_args()
source = sb.op.normpath(args.source)
target = sb.op.normpath(args.target)
data_dir, target_cat_name = sb.op.split(target)
pages = map(sb.page_from_path, sb.catalog_pages(source,ext=".tif"))
print("Source catalog:")
print("path:", source)
print("pages:", len(pages))
conversion = {"ext": ".jpg", "remove_type": True, "to_cat": data_dir,"cat": target_cat_name}
from_to = [(page, sb.convert_page_path(page, conversion)) for page in pages]
for ft in tqdm(from_to):
if exit:
break
from_page, to_page = ft
if sb.op.isfile(to_page['path']) and not args.update:
continue
else:
co.convert_to_png(*ft)
|
the-stack_106_16635
|
import copy
import logging
import sys
from collections import defaultdict
from typing import Dict
import methodtools
from timebudget import timebudget
from jyotisha.panchaanga.spatio_temporal import daily
from jyotisha.panchaanga.temporal import time, set_constants, ComputationSystem, AngaType
from jyotisha.panchaanga.temporal.festival import FestivalInstance
from jyotisha.panchaanga.temporal.festival.applier import tithi_festival, ecliptic, solar, vaara, rule_repo_based, \
FestivalAssigner
from jyotisha.panchaanga.temporal.time import Date
from jyotisha.panchaanga.temporal.tithi import ShraddhaTithiAssigner
from jyotisha.panchaanga.temporal.zodiac.angas import Tithi
from jyotisha.util import default_if_none
from sanskrit_data import collection_helper
from sanskrit_data.schema import common
timebudget.set_quiet(True) # don't show measurements as they happen
# timebudget.report_at_exit() # Generate report when the program exits
set_constants()
class Panchaanga(common.JsonObject):
"""This class enables the construction of a panchaanga for arbitrary periods, with festival_id_to_instance.
Generally, which days is a given festival associated with (esp pre-sunrise events)? We follow the same conventions as the adyatithi repo.
"""
LATEST_VERSION = "0.0.4"
def __init__(self, city, start_date, end_date, computation_system: ComputationSystem = None):
"""Constructor for the panchaanga.
"""
super(Panchaanga, self).__init__()
self.version = Panchaanga.LATEST_VERSION
self.city = city
self.start_date = Date(*([int(x) for x in start_date.split('-')])) if isinstance(start_date, str) else start_date
self.start_date.set_time_to_day_start()
self.end_date = Date(*([int(x) for x in end_date.split('-')])) if isinstance(end_date, str) else end_date
self.end_date.set_time_to_day_start()
self.computation_system = default_if_none(computation_system, ComputationSystem.DEFAULT)
self.jd_start = time.utc_gregorian_to_jd(self.start_date)
self.jd_end = time.utc_gregorian_to_jd(self.end_date)
self.duration = int(self.jd_end - self.jd_start) + 1
# For accurate festival assignment, we sometimes need panchaanga information about succeeding or preceding days.
# For example, consider a festival to be selebrated during naxatra 27 in solar sideral month 9. If naxatra 27 occurs twice in sidereal_solar_month 9 (gap of 27+ daus), the latter occurence is to be selected - the former day will not get a festival.
self.duration_posterior_padding = int(self.duration + 30)
self.duration_prior_padding = 2
self.weekday_start = time.get_weekday(self.jd_start)
self.festival_id_to_days = defaultdict(set, {})
self.compute_angas(compute_lagnas=self.computation_system.festival_options.lagnas)
if not self.computation_system.festival_options.no_fests:
self.update_festival_details()
@timebudget
def compute_angas(self, compute_lagnas=True):
"""Compute the entire panchaanga
"""
# INITIALISE VARIABLES
self.date_str_to_panchaanga: Dict[str, daily.DailyPanchaanga] = {}
#############################################################
# Compute all parameters -- sun/moon latitude/longitude etc #
#############################################################
for d in range(-self.duration_prior_padding, self.duration_posterior_padding - 1):
# The below block is temporary code to make the transition seamless.
date_d = time.jd_to_utc_gregorian(self.jd_start + d)
date_d.set_time_to_day_start()
previous_daily_panchaanga = self.date_str_to_panchaanga.get(date_d.offset_date(days=-1).get_date_str(), None)
daily_panchaanga = daily.DailyPanchaanga(city=self.city, date=date_d,
computation_system=self.computation_system,
previous_day_panchaanga=previous_daily_panchaanga)
if compute_lagnas:
daily_panchaanga.get_lagna_data()
self.date_str_to_panchaanga[date_d.get_date_str()] = daily_panchaanga
@methodtools.lru_cache(maxsize=10)
def daily_panchaangas_sorted(self, skip_padding_days=False):
if not skip_padding_days:
return sorted(self.date_str_to_panchaanga.values())
else:
full_list = sorted(self.date_str_to_panchaanga.values())
return [x for x in full_list if self.start_date <= x.date and x.date <= self.end_date]
def daily_panchaanga_for_jd(self, jd):
date = self.city.get_timezone_obj().julian_day_to_local_time(julian_day=jd)
return self.daily_panchaanga_for_date(date=date)
def daily_panchaanga_for_date(self, date):
return self.date_str_to_panchaanga.get(date.get_date_str(), None)
def pre_sunset_daily_panchaanga_for_jd(self, jd):
panchaanga = self.daily_panchaanga_for_jd(jd=jd)
if panchaanga is None:
return None
elif panchaanga.jd_sunset >= jd:
return panchaanga
else:
return self.daily_panchaanga_for_date(date=panchaanga.date + 1)
def post_sunrise_daily_panchaanga_for_jd(self, jd):
panchaanga = self.daily_panchaanga_for_jd(jd=jd)
if panchaanga is None:
return None
elif panchaanga.jd_sunrise <= jd:
return panchaanga
else:
return self.daily_panchaanga_for_date(date=panchaanga.date - 1)
def get_interval_anga_spans(self, date, interval_id, anga_type):
dp = self.daily_panchaanga_for_date(date)
(anga_spans, _) = dp.get_interval_anga_spans(interval_id=interval_id, anga_type=anga_type)
anga_spans = copy.deepcopy(anga_spans)
if anga_type == AngaType.TITHI:
for span in anga_spans:
if span.anga.index in (1, 2):
# The below is necessary because tithi 1 or 2 may start after sunrise.
dp_next = self.daily_panchaanga_for_date(date + 1)
# Lunar month below may be incorrect (adhika mAsa complication) if dp_next is not available (eg when the next day is beyond this panchaanga duration). Downstream code should be aware of that case.
month = dp_next.lunar_month_sunrise if dp_next is not None else dp.lunar_month_sunrise + 1
span.anga = Tithi.from_anga(anga=span.anga, month=month)
else:
span.anga = Tithi.from_anga(anga=span.anga, month=dp.lunar_month_sunrise)
return anga_spans
def clear_padding_day_festivals(self):
"""Festival assignments for padding days are not trustworthy - since one would need to look-ahead or before into further days for accurate festival assignment. They were computed only to ensure accurate computation of the core days in this panchaanga. To avoid misleading, we ought to clear festivals provisionally assigned to the padding days."""
daily_panchaangas = self.daily_panchaangas_sorted()
for dp in daily_panchaangas[:self.duration_prior_padding]:
self.delete_festivals_on_date(date=dp.date)
for dp in daily_panchaangas[self.duration_prior_padding + self.duration:]:
self.delete_festivals_on_date(date=dp.date)
@timebudget
def update_festival_details(self):
"""
Festival data may be updated more frequently and a precomputed panchaanga may go out of sync. Hence we keep this method separate.
:return:
"""
self._reset_festivals()
rule_lookup_assigner = rule_repo_based.RuleLookupAssigner(panchaanga=self)
rule_lookup_assigner.apply_festival_from_rules_repos()
ShraddhaTithiAssigner(panchaanga=self).assign_shraaddha_tithi()
ecliptic.EclipticFestivalAssigner(panchaanga=self).assign_all()
tithi_festival.TithiFestivalAssigner(panchaanga=self).assign_all()
solar.SolarFestivalAssigner(panchaanga=self).assign_all()
vaara.VaraFestivalAssigner(panchaanga=self).assign_all()
generic_assigner = FestivalAssigner(panchaanga=self)
generic_assigner.cleanup_festivals()
rule_lookup_assigner.assign_relative_festivals()
# self._sync_festivals_dict_and_daily_festivals(here_to_daily=True, daily_to_here=True)
generic_assigner.assign_festival_numbers()
self.clear_padding_day_festivals()
def _sync_festivals_dict_and_daily_festivals(self, here_to_daily=False, daily_to_here=True):
if here_to_daily:
for festival_id, days in self.festival_id_to_days.items():
for fest_day in days:
if not isinstance(fest_day, Date):
logging.fatal(festival_id + " " + str(days))
fest_day_str = fest_day.get_date_str()
if fest_day_str in self.date_str_to_panchaanga and festival_id not in self.date_str_to_panchaanga[fest_day_str].festival_id_to_instance:
self.date_str_to_panchaanga[fest_day_str].festival_id_to_instance[festival_id] = FestivalInstance(name=festival_id)
if daily_to_here:
for dp in self.date_str_to_panchaanga.values():
for fest in dp.festival_id_to_instance.values():
days = self.festival_id_to_days.get(fest.name, set())
if dp.date not in days:
days.add(dp.date)
self.festival_id_to_days[fest.name] = days
def _reset_festivals(self):
self.festival_id_to_days = defaultdict(set, {})
for daily_panchaanga in self.date_str_to_panchaanga.values():
daily_panchaanga.festival_id_to_instance = {}
def delete_festival(self, fest_id):
for date in self.festival_id_to_days.pop(fest_id, []):
self.date_str_to_panchaanga[date.get_date_str()].festival_id_to_instance.pop(fest_id, None)
def add_festival(self, fest_id, date, interval_id="full_day"):
if date.get_date_str() not in self.date_str_to_panchaanga:
return
interval = self.date_str_to_panchaanga[date.get_date_str()].get_interval(interval_id=interval_id)
self.add_festival_instance(date=date, festival_instance=FestivalInstance(name=fest_id, interval=interval))
def add_festival_instance(self, festival_instance, date):
p_fday = self.date_str_to_panchaanga.get(date.get_date_str(), None)
if p_fday is not None:
p_fday.festival_id_to_instance[festival_instance.name] = festival_instance
self.festival_id_to_days[festival_instance.name].add(date)
def delete_festival_date(self, fest_id, date):
self.festival_id_to_days[fest_id].discard(date)
if len(self.festival_id_to_days[fest_id]) == 0:
# Avoid empty items (when serializing).
self.delete_festival(fest_id=fest_id)
self.date_str_to_panchaanga[date.get_date_str()].festival_id_to_instance.pop(fest_id, None)
def delete_festivals_on_date(self, date):
# Reason for casting to list below: Avoid RuntimeError: dictionary changed size during iteration
dp = self.date_str_to_panchaanga[date.get_date_str()]
for fest_id in list(dp.festival_id_to_instance.keys()):
self.delete_festival_date(fest_id=fest_id, date=dp.date)
def _refill_daily_panchaangas(self):
"""Avoids duplication for memory efficiency.
Inverse of _force_non_redundancy_in_daily_panchaangas
"""
for daily_panchaanga in self.date_str_to_panchaanga.values():
daily_panchaanga.city = self.city
daily_panchaanga.computation_system = self.computation_system
def _force_non_redundancy_in_daily_panchaangas(self):
"""Avoids duplication for memory efficiency."""
for daily_panchaanga in self.date_str_to_panchaanga.values():
daily_panchaanga.city = None
daily_panchaanga.computation_system = None
def post_load_ops(self):
self._refill_daily_panchaangas()
self.festival_id_to_days = collection_helper.lists_to_sets(self.festival_id_to_days)
@timebudget
def dump_to_file(self, filename, floating_point_precision=None, sort_keys=True):
self._force_non_redundancy_in_daily_panchaangas()
self.festival_id_to_days = collection_helper.sets_to_lists(self.festival_id_to_days)
super(Panchaanga, self).dump_to_file(filename=filename, floating_point_precision=floating_point_precision,
sort_keys=sort_keys)
self.festival_id_to_days = collection_helper.lists_to_sets(self.festival_id_to_days)
self._refill_daily_panchaangas()
# Essential for depickling to work.
common.update_json_class_index(sys.modules[__name__])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.