filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_29343 | import warnings, numpy as np
from keras import backend as K
class IncreaseBSOnPlateau:
"""Increase batch size when a metric has stopped improving.
Models often benefit from increasing the batch size by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the batch size is increased until 'max_bs' is met,
after which the learning rate is reduced.
# Example
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
# Arguments
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
epsilon: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self, model, monitor='val_loss', factor_bs=2,
patience=10, verbose=0, mode='auto', epsilon=1e-4, cooldown=0,
max_bs=None, min_lr=0):
#super(IncreaseBSOnPlateau, self).__init__()
self.monitor = monitor
if factor_bs <= 1.0:
raise ValueError('IncreaseBSOnPlateau '
'does not support a factor_bs <= 1.0.')
self.model = model
self.factor_bs = float(factor_bs)
self.factor_lr = 1.0 / self.factor_bs
if type(max_bs)==type(None):# XXX
raise ValueError('IncreaseBSOnPlateau '
'does not support a max_bs == None'
'Implementation notes: need to find how to do'
'model.history.params.samples/10')
else:
self.max_bs = max_bs
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Batch Size Plateau Increasing mode %s is unknown, '
'fallback to auto mode.' % (self.mode),
RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
def on_train_begin(self, logs=None):
self._reset()
def update_model(self, model):
self.model = model
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['bs'] = self.model.history.params.get('batch_size',None) #XXX: will break if .fit_generator
logs['lr'] = K.get_value(self.model.optimizer.lr)
#current = logs.get(self.monitor)
current = self.model.history.history[self.monitor][-1]
print("current: {}".format(current))
if current is None:
warnings.warn(
'Increase BS on plateau conditioned on metric `%s` '
'which is not available. Available metrics are: %s' %
(self.monitor, ','.join(list(logs.keys()))), RuntimeWarning
)
else: #AAF: current is a valid metric
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
new_bs = self.model.history.params.get('batch_size',None)
new_lr = K.get_value(self.model.optimizer.lr)
if self.monitor_op(current, self.best): #AAF: self.wait is the # of epoch since best results
self.best = current
self.wait = 0
new_bs = self.model.history.params.get('batch_size',None)
new_lr = K.get_value(self.model.optimizer.lr)
elif not self.in_cooldown(): # current is not best
if self.wait >= self.patience: # DO BS and LR calculation
old_bs = int(self.model.history.params.get('batch_size',None))
old_lr = float(K.get_value(self.model.optimizer.lr))
if ((old_bs * self.factor_bs) > self.max_bs) and (old_bs < self.max_bs): # Can't increase BS by full margin
# g = lr * Samples / BS (Yi et al. 2017) g is the SGD noise
sample_size = int(self.model.history.params.get('samples',None))
new_bs = int(old_bs * self.factor_bs)
new_bs = min(new_bs, self.max_bs)
old_g = old_lr * sample_size / old_bs
new_g = old_g / self.factor_bs
new_lr = new_g * new_bs / self.model.history.params.get('samples')
if self.verbose > 0:
print('\nEpoch %05d: increasing batch size to %s and reducing lr to %s.' % (epoch + 1, new_bs, new_lr))
elif old_bs < self.max_bs: # Full increase in BS
new_bs = int(old_bs * self.factor_bs)
new_bs = min(new_bs, self.max_bs)
new_lr = old_lr
if self.verbose > 0:
print('\nEpoch %05d: increasing batch size to %s.' % (epoch + 1, new_bs))
self.cooldown_counter = self.cooldown
self.wait = 0
elif old_lr > self.min_lr + self.lr_epsilon: # Reducing LR instead of increasing BS
new_lr = old_lr * self.factor_lr
new_lr = max(new_lr, self.min_lr)
new_bs = old_bs
#K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate to %s.' % (epoch + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
elif old_lr <= self.min_lr + self.lr_epsilon: # lr == min_lr
new_bs = old_bs
new_lr = old_lr
else: # set bs and lr value while waiting
new_bs = self.model.history.params.get('batch_size',None)
new_lr = K.get_value(self.model.optimizer.lr)
self.wait += 1
return new_bs, new_lr
def in_cooldown(self):
return self.cooldown_counter > 0
|
the-stack_106_29345 | import os
from setuptools import setup
def get_version():
v = "0.0.0"
return v
setup(
name = "hivevo",
version = get_version(),
author = "Fabio Zanini and Richard Neher",
author_email = "[email protected]",
description = ("HIVevo access"),
long_description = "This is a simple collection of classes that provide a python interface to precomputed data from the HIVEVO project",
long_description_content_type="text/markdown",
license = "MIT",
keywords = "",
url = "https://github.com/neherlab/HIVEVO_access",
packages=['hivevo'],
install_requires = [
'biopython>=1.67',
'numpy>=1.10.4',
'pandas>=0.17.1',
'scipy>=0.16.1'
],
extras_require = {
':python_version >= "3.6"':['matplotlib>=2.0'],
},
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8"
]
)
|
the-stack_106_29347 |
import pandas as pd
import numpy as np
import pickle
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
nSamples = 400
trainSamples = int( 0.7*nSamples)
testSamples = int(0.1*nSamples)
valSamples = int(0.2*nSamples)
refinement = 1
lenGrid = 300
inputShape = 300
def getLoadData():
sideLoadInp = np.zeros((nSamples, lenGrid, lenGrid))
upLoadInp = np.zeros((nSamples, lenGrid, lenGrid))
lowLoadInp = np.zeros((nSamples, lenGrid, lenGrid))
load = pd.read_csv('Data_CSV/load.csv', delimiter="\t")
print(load.columns)
sideLoad = load[' Side Load '].to_numpy()
upperLoad = load[' Upper Load '].to_numpy()
lowerLoad = load[' Lower Load '].to_numpy()
for i in range(0, nSamples):
sideLoadInp[i][0][:] = sideLoad[i]
upLoadInp[i][0][:] = upperLoad[i]
lowLoadInp[i][0][:] = lowerLoad[i]
return sideLoadInp, upLoadInp, lowLoadInp
def SplitData ():
maskedIndexTotal_shfld =[]
maskedIndexFile = open('maskedIndexTotal.pkl', 'rb')
inputData = np.load('inputData.npy')
outputData = np.load('outputData.npy')
meanStdStress = np.load('meanStdStress.npy', allow_pickle = True)
maskedIndexTotal = pickle.load(maskedIndexFile)
maskedIndexFile.close()
sideLoadInp, upLoadInp, lowLoadInp = getLoadData()
print(inputData.shape, outputData.shape, sideLoadInp.shape)
# for i in range(0, nSamples):
# print(str(len(maskedIndexTotal[i][0]))+"\t"+str(len(maskedIndexTotal[i][0]))+"\n")
randIndices = np.arange(0, nSamples)
np.random.shuffle(randIndices)
inputData = inputData[randIndices][:][:]
outputData = outputData[randIndices][:][:]
sideLoadInp = sideLoadInp[randIndices][:][:]
upLoadInp = upLoadInp[randIndices][:][:]
lowLoadInp = lowLoadInp[randIndices][:][:]
meanStdStress_shuffled = meanStdStress[randIndices][:][:]
for i in randIndices:
maskedIndexTotal_shfld.append(maskedIndexTotal[i])
trainInput1 = inputData[0:trainSamples][:][:]
trainOutput = outputData[0:trainSamples][:][:]
valInput1 = inputData[trainSamples:valSamples+trainSamples][:][:]
valOutput = outputData[trainSamples:valSamples+trainSamples][:][:]
testInput1 = inputData[valSamples+trainSamples: valSamples+trainSamples+testSamples][:][:]
testOutput = outputData[valSamples+trainSamples: valSamples+trainSamples+testSamples][:][:]
trainInput2_1 = sideLoadInp[0:trainSamples][:][:]
trainInput2_2 = upLoadInp[0:trainSamples][:][:]
trainInput2_3 = lowLoadInp[0:trainSamples][:][:]
valInput2_1 = sideLoadInp[trainSamples:valSamples + trainSamples][:][:]
valInput2_2 = upLoadInp[trainSamples:valSamples + trainSamples][:][:]
valInput2_3 = lowLoadInp[trainSamples:valSamples + trainSamples][:][:]
testInput2_1 = sideLoadInp[valSamples + trainSamples: valSamples + trainSamples + testSamples][:][:]
testInput2_2 = upLoadInp[valSamples + trainSamples: valSamples + trainSamples + testSamples][:][:]
testInput2_3 = lowLoadInp[valSamples + trainSamples: valSamples + trainSamples + testSamples][:][:]
return trainInput1,trainInput2_1, trainInput2_2, trainInput2_3, trainOutput, valInput1, valInput2_1, valInput2_2, valInput2_3, valOutput, testInput1,testInput2_1, testInput2_2, testInput2_3, testOutput, maskedIndexTotal_shfld, meanStdStress_shuffled
trainInput1,trainInput2_1, trainInput2_2, trainInput2_3, trainOutput, valInput1, valInput2_1, valInput2_2, valInput2_3, valOutput, testInput1,testInput2_1, testInput2_2, testInput2_3, testOutput, maskedIndexTotal_shfld, meanStdStress_shuffled = SplitData()
# print(testInput2.shape)
maskedIndexFile = open('maskedIndexTotal_suffled.pkl', 'wb')
pickle.dump(maskedIndexTotal_shfld, maskedIndexFile)
maskedIndexFile.close()
np.save('trainInput1.npy', trainInput1)
np.save('trainInput2_1.npy', trainInput2_1)
np.save('trainInput2_2.npy', trainInput2_2)
np.save('trainInput2_3.npy', trainInput2_3)
np.save('trainOutput.npy', trainOutput)
np.save('testInput1.npy', testInput1)
np.save('testInput2_1.npy', testInput2_1)
np.save('testInput2_2.npy', testInput2_2)
np.save('testInput2_3.npy', testInput2_3)
np.save('testOutput.npy', testOutput)
np.save('valInput1.npy', valInput1)
np.save('valInput2_1.npy', valInput2_1)
np.save('valInput2_2.npy', valInput2_2)
np.save('valInput2_3.npy', valInput2_3)
np.save('valOutput.npy', valOutput)
np.save('meanStdStress_shuffled.npy', meanStdStress_shuffled)
|
the-stack_106_29350 | r"""
Bijection between rigged configurations for `B(\infty)` and marginally large tableaux
AUTHORS:
- Travis Scrimshaw (2015-07-01): Initial version
REFERENCES:
.. [RC-MLT] Ben Salisbury and Travis Scrimshaw. *Connecting marginally
large tableaux and rigged configurations via crystals*.
Preprint. :arxiv:`1505.07040`.
"""
#*****************************************************************************
# Copyright (C) 2015 Travis Scrimshaw <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.combinat.rigged_configurations.rigged_configurations import RiggedConfigurations
from sage.combinat.rigged_configurations.bij_type_B import (KRTToRCBijectionTypeB,
RCToKRTBijectionTypeB)
from sage.combinat.rigged_configurations.bij_type_D import (KRTToRCBijectionTypeD,
RCToKRTBijectionTypeD)
from sage.combinat.rigged_configurations.bij_type_A import (KRTToRCBijectionTypeA,
RCToKRTBijectionTypeA)
from sage.combinat.rigged_configurations.bij_type_C import (KRTToRCBijectionTypeC,
RCToKRTBijectionTypeC)
from sage.combinat.rigged_configurations.tensor_product_kr_tableaux import TensorProductOfKirillovReshetikhinTableaux
from sage.combinat.crystals.letters import CrystalOfLetters
from sage.combinat.root_system.cartan_type import CartanType
from sage.categories.morphism import Morphism
from sage.categories.homset import Hom
from sage.misc.flatten import flatten
class FromTableauIsomorphism(Morphism):
r"""
Crystal isomorphism of `B(\infty)` in the tableau model to the
rigged configuration model.
"""
def _repr_type(self):
r"""
Return the type of morphism of ``self``.
EXAMPLES::
sage: RC = crystals.infinity.RiggedConfigurations(['A',3])
sage: T = crystals.infinity.Tableaux(['A',3])
sage: phi = RC.coerce_map_from(T)
sage: phi._repr_type()
'Crystal Isomorphism'
"""
return "Crystal Isomorphism"
def __invert__(self):
r"""
Return the inverse of ``self``.
EXAMPLES::
sage: RC = crystals.infinity.RiggedConfigurations(['A',3])
sage: T = crystals.infinity.Tableaux(['A',3])
sage: phi = RC.coerce_map_from(T)
sage: ~phi
Crystal Isomorphism morphism:
From: The infinity crystal of rigged configurations of type ['A', 3]
To: The infinity crystal of tableaux of type ['A', 3]
"""
return FromRCIsomorphism(Hom(self.codomain(), self.domain()))
def _call_(self, x):
r"""
Return the image of ``x`` in the rigged configuration model
of `B(\infty)`.
EXAMPLES::
sage: RC = crystals.infinity.RiggedConfigurations(['A',3])
sage: T = crystals.infinity.Tableaux(['A',3])
sage: phi = RC.coerce_map_from(T)
sage: x = T.an_element().f_string([2,2,1,1,3,2,1,2,1,3])
sage: y = phi(x); ascii_art(y)
-4[ ][ ][ ][ ]-2 -3[ ][ ][ ]-1 -1[ ][ ]-1
-2[ ]-1
sage: (~phi)(y) == x
True
"""
conj = x.to_tableau().conjugate()
ct = self.domain().cartan_type()
act = ct.affine()
TP = TensorProductOfKirillovReshetikhinTableaux(act, [[r,1] for r in conj.shape()])
elt = TP(pathlist=[reversed(row) for row in conj])
if ct.type() == 'A':
bij = KRTToRCBijectionTypeA(elt)
elif ct.type() == 'B':
bij = MLTToRCBijectionTypeB(elt)
elif ct.type() == 'C':
bij = KRTToRCBijectionTypeC(elt)
elif ct.type() == 'D':
bij = MLTToRCBijectionTypeD(elt)
else:
raise NotImplementedError("bijection of type {} not yet implemented".format(ct))
return self.codomain()(bij.run())
class FromRCIsomorphism(Morphism):
r"""
Crystal isomorphism of `B(\infty)` in the rigged configuration model
to the tableau model.
"""
def _repr_type(self):
r"""
Return the type of morphism of ``self``.
EXAMPLES::
sage: T = crystals.infinity.Tableaux(['A',3])
sage: RC = crystals.infinity.RiggedConfigurations(['A',3])
sage: phi = T.coerce_map_from(RC)
sage: phi._repr_type()
'Crystal Isomorphism'
"""
return "Crystal Isomorphism"
def __invert__(self):
r"""
Return the inverse of ``self``.
EXAMPLES::
sage: T = crystals.infinity.Tableaux(['A',3])
sage: RC = crystals.infinity.RiggedConfigurations(['A',3])
sage: phi = T.coerce_map_from(RC)
sage: ~phi
Crystal Isomorphism morphism:
From: The infinity crystal of tableaux of type ['A', 3]
To: The infinity crystal of rigged configurations of type ['A', 3]
"""
return FromTableauIsomorphism(Hom(self.codomain(), self.domain()))
def _call_(self, x):
r"""
Return the image of ``x`` in the tableau model of `B(\infty)`.
EXAMPLES::
sage: T = crystals.infinity.Tableaux(['A',3])
sage: RC = crystals.infinity.RiggedConfigurations(['A',3])
sage: phi = T.coerce_map_from(RC)
sage: x = RC.an_element().f_string([2,2,1,1,3,2,1,2,1,3])
sage: y = phi(x); y.pp()
1 1 1 1 1 2 2 3 4
2 2 3 4
3
sage: (~phi)(y) == x
True
"""
lam = [sum(nu)+1 for nu in x]
ct = self.domain().cartan_type()
I = ct.index_set()
if ct.type() == 'D':
lam[-2] = max(lam[-2], lam[-1])
lam.pop()
l = sum([ [[r+1,1]]*v for r,v in enumerate(lam[:-1]) ], [])
n = len(I)
l = l + sum([ [[n,1], [n-1,1]] for k in range(lam[-1])], [])
else:
if ct.type() == 'B':
lam[-1] *= 2
l = sum([ [[r,1]]*lam[i] for i,r in enumerate(I) ], [])
RC = RiggedConfigurations(ct.affine(), reversed(l))
elt = RC(x)
if ct.type() == 'A':
bij = RCToKRTBijectionTypeA(elt)
elif ct.type() == 'B':
bij = RCToMLTBijectionTypeB(elt)
elif ct.type() == 'C':
bij = RCToKRTBijectionTypeC(elt)
elif ct.type() == 'D':
bij = RCToMLTBijectionTypeD(elt)
else:
raise NotImplementedError("bijection of type {} not yet implemented".format(ct))
y = bij.run()
# Now make the result marginally large
y = [list(c) for c in y]
cur = []
L = CrystalOfLetters(ct)
for i in I:
cur.insert(0, L(i))
c = y.count(cur)
while c > 1:
y.remove(cur)
c -= 1
return self.codomain()(*flatten(y))
class MLTToRCBijectionTypeB(KRTToRCBijectionTypeB):
def run(self):
r"""
Run the bijection from a marginally large tableaux to a rigged
configuration.
EXAMPLES::
sage: vct = CartanType(['B',4]).as_folding()
sage: RC = crystals.infinity.RiggedConfigurations(vct)
sage: T = crystals.infinity.Tableaux(['B',4])
sage: Psi = T.crystal_morphism({T.module_generators[0]: RC.module_generators[0]})
sage: TS = [x.value for x in T.subcrystal(max_depth=4)]
sage: all(Psi(b) == RC(b) for b in TS) # long time # indirect doctest
True
"""
for cur_crystal in reversed(self.tp_krt):
cur_column = list(cur_crystal)
self.cur_path.insert(0, []) # Prepend an empty list
self.cur_dims.insert(0, [0, 1])
for letter in reversed(cur_column):
self.cur_dims[0][0] += 1
val = letter.value # Convert from a CrystalOfLetter to an Integer
# Build the next state
self.cur_path[0].insert(0, [letter]) # Prepend the value
if self.cur_dims[0][0] == self.n:
# Spinor case, we go from \Lambda_{n-1} -> 2\Lambda_n
self.cur_dims.insert(1, [self.n,1])
self.cur_path.insert(1, self.cur_path[0])
self.next_state(val)
self.ret_rig_con.set_immutable() # Return it to immutable
return self.ret_rig_con
class RCToMLTBijectionTypeB(RCToKRTBijectionTypeB):
def run(self):
r"""
Run the bijection from rigged configurations to a marginally large
tableau.
EXAMPLES::
sage: vct = CartanType(['B',4]).as_folding()
sage: RC = crystals.infinity.RiggedConfigurations(vct)
sage: T = crystals.infinity.Tableaux(['B',4])
sage: Psi = RC.crystal_morphism({RC.module_generators[0]: T.module_generators[0]})
sage: RCS = [x.value for x in RC.subcrystal(max_depth=4)]
sage: all(Psi(nu) == T(nu) for nu in RCS) # long time # indirect doctest
True
"""
letters = CrystalOfLetters(self.rigged_con.parent()._cartan_type.classical())
ret_crystal_path = []
while self.cur_dims:
dim = self.cur_dims[0]
ret_crystal_path.append([])
# Assumption: all factors are single columns
if dim[0] == self.n:
# Spinor case, since we've done 2\Lambda_n -> \Lambda_{n-1}
self.cur_dims.pop(1)
while dim[0] > 0:
dim[0] -= 1 # This takes care of the indexing
b = self.next_state(dim[0])
# Make sure we have a crystal letter
ret_crystal_path[-1].append(letters(b)) # Append the rank
self.cur_dims.pop(0) # Pop off the leading column
return ret_crystal_path
class MLTToRCBijectionTypeD(KRTToRCBijectionTypeD):
def run(self):
r"""
Run the bijection from a marginally large tableaux to a rigged
configuration.
EXAMPLES::
sage: RC = crystals.infinity.RiggedConfigurations(['D',4])
sage: T = crystals.infinity.Tableaux(['D',4])
sage: Psi = T.crystal_morphism({T.module_generators[0]: RC.module_generators[0]})
sage: TS = [x.value for x in T.subcrystal(max_depth=4)]
sage: all(Psi(b) == RC(b) for b in TS) # long time # indirect doctest
True
"""
for cur_crystal in reversed(self.tp_krt):
# Iterate through the columns
cur_column = list(cur_crystal)
self.cur_path.insert(0, []) # Prepend an empty list
self.cur_dims.insert(0, [0, 1])
for letter in reversed(cur_column):
self.cur_dims[0][0] += 1
val = letter.value # Convert from a CrystalOfLetter to an Integer
# Build the next state
self.cur_path[0].insert(0, [letter]) # Prepend the value
self.next_state(val)
if self.cur_dims[0][0] == self.n - 1:
# Spinor case, we go from \Lambda_{n-2} -> \Lambda_{n-1} + \Lambda_n
self.cur_dims.insert(1, [self.n,1])
self.cur_path.insert(1, self.cur_path[0] + [None])
self.ret_rig_con.set_immutable() # Return it to immutable
return self.ret_rig_con
class RCToMLTBijectionTypeD(RCToKRTBijectionTypeD):
def run(self):
r"""
Run the bijection from rigged configurations to a marginally large
tableau.
EXAMPLES::
sage: RC = crystals.infinity.RiggedConfigurations(['D',4])
sage: T = crystals.infinity.Tableaux(['D',4])
sage: Psi = RC.crystal_morphism({RC.module_generators[0]: T.module_generators[0]})
sage: RCS = [x.value for x in RC.subcrystal(max_depth=4)]
sage: all(Psi(nu) == T(nu) for nu in RCS) # long time # indirect doctest
True
"""
letters = CrystalOfLetters(self.rigged_con.parent()._cartan_type.classical())
ret_crystal_path = []
while self.cur_dims:
dim = self.cur_dims[0]
ret_crystal_path.append([])
# Assumption: all factors are single columns
if dim[0] == self.n - 1:
# Spinor case, since we've done \Lambda_n + \Lambda_{n-1} -> \Lambda_{n-2}
self.cur_dims.pop(1)
while dim[0] > 0:
dim[0] -= 1 # This takes care of the indexing
b = self.next_state(dim[0])
# Make sure we have a crystal letter
ret_crystal_path[-1].append(letters(b)) # Append the rank
self.cur_dims.pop(0) # Pop off the leading column
return ret_crystal_path
|
the-stack_106_29351 | from threading import Timer
import time
class Device:
def __init__(self, config, callback):
self.is_switch = False
self.input_outputs = {}
self.callback = callback
self.config = config
self.board = self.config["board_type"]
def read_value(self, callback=None):
data = {}
success = False
while not success:
try:
values = self.read_value_imp()
data = {"sub_topic":"","msg":{"id": self.config["id"], "custom_id": self.config["custom_id"], "timestamp":int(time.time()), "values": values}}
success = True
except:
print("could not read from sensor(id: {})".format(self.config["custom_id"]))
time.sleep(5)
callback(data) if callback else self.callback(data)
def read_value_loop(self, interval = None, callback=None):
if not interval:
interval = self.config["interval"]
self.read_value(callback)
t = Timer(interval, self.read_value_loop, [interval, callback])
t.start()
def init_input_outputs(self, decide_io):
for io in self.config["input_output"]:
name = io["name"]
io_constructor = decide_io(name)
self.input_outputs[name] = io_constructor(io)
|
the-stack_106_29352 | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""
This script will prepare a release by:
- Generating the JSON and MO files for all language packages from PO files
- Update the contributors list
- Bumping the version
"""
# Standard library imports
import argparse
import configparser
import hashlib
import os
import re
import subprocess
import traceback
from pathlib import Path
from typing import List, Optional
# Third party imports
import polib
from jupyterlab_translate import api
from contributors import get_contributors_report
# Constants
HERE = Path(__file__).parent.resolve()
REPO_ROOT = HERE.parent
LANG_PACKS_PATH = REPO_ROOT / "language-packs"
JLAB_LOCALE_PATH = REPO_ROOT / "jupyterlab" / "locale"
JLAB_EXT_PATH = REPO_ROOT / "extensions"
LOCALE_FOLDER = "locale"
LC_MESSAGES_FOLDER = "LC_MESSAGES"
BUMP_CONFIG = ".bumpversion.cfg"
CONTRIBUTORS = "CONTRIBUTORS.md"
VERSION_REGEX = re.compile(r"\d+\.\d+\.post\d+")
def load_hash(package_dir: Path) -> str:
"""Read bump2version hash from package."""
hash_value = None
config_path = package_dir / BUMP_CONFIG
if config_path.is_file():
config = configparser.ConfigParser()
config.read(config_path)
hash_value = config.get("hash", "value", fallback=None)
if not hash_value:
hash_value = None
return hash_value
def save_hash(package_dir: Path, hash_value: str) -> None:
"""Save the new hash value in bump2version configuration file."""
config_path = package_dir / BUMP_CONFIG
config = configparser.ConfigParser()
if config_path.is_file():
config.read(config_path)
if not config.has_section("hash"):
config.add_section("hash")
config["hash"]["value"] = hash_value
with open(config_path, "w") as fh:
config.write(fh)
def create_hash(*files: Path) -> str:
"""Compute the hash value of file paths."""
hasher = hashlib.sha256()
for f in files:
hasher.update(f.read_bytes())
return hasher.hexdigest()
def is_updated_translation(po_file_paths: List[Path], package_dir: Path) -> bool:
"""Are the translations updated?
Notes:
If the package directory does not exist, it will be created using the
language pack cookiecutter template.
Args:
po_file_paths: translations PO files
package_dir: Package directory containing the PO file
Returns:
Whether the translations have been updated or not
"""
old_hash = load_hash(package_dir)
new_hash = create_hash(*po_file_paths)
if old_hash is None:
return True
else:
return old_hash != new_hash
def bumpversion(path: Path, new_version: Optional[str] = None) -> None:
"""Update the package version.
Args:
path: Package path
release: Is the new version a release or a patch version.
"""
if new_version:
cmd_args = [
"bump2version",
"--allow-dirty",
"--new-version",
new_version,
"build",
]
else:
cmd_args = ["bump2version", "--allow-dirty", "build"]
subprocess.check_call(cmd_args, cwd=path)
def prepare_jupyterlab_lp_release(
crowdin_key: str, new_version: Optional[str] = None
) -> None:
"""Prepare the JupyterLab Language Packages release
Version are in format X.Y.postZ and by default Z will be bumped.
Args:
crowding_key: Crowdin API key
new_version: [optional] New version of the package
"""
# TODO upgrade from cookiecutter template
# Minimal percentage needed to compile a PO file
COMPILATION_THRESHOLD = 0
# This assumes the JupyterLab folder is the source of truth for available locales
for locale in sorted(filter(lambda i: i.is_dir(), JLAB_LOCALE_PATH.iterdir())):
locale_name = locale.name.replace("_", "-")
package_dir = LANG_PACKS_PATH / f"jupyterlab-language-pack-{locale_name}"
# Bump the version
if package_dir.exists():
bumpversion(package_dir, new_version)
else:
api.create_new_language_pack(LANG_PACKS_PATH, locale.name)
if new_version:
bumpversion(package_dir, new_version)
try:
all_po_files = [
JLAB_LOCALE_PATH / locale / LC_MESSAGES_FOLDER / "jupyterlab.po"
] + [
(
extension
/ LOCALE_FOLDER
/ locale.name
/ LC_MESSAGES_FOLDER
/ f"{extension.name}.po"
)
for extension in JLAB_EXT_PATH.iterdir()
]
po_files = list(filter(lambda f: f.is_file(), all_po_files))
# Check if PO files have been changed
if is_updated_translation(po_files, package_dir):
any_compiled = False
# Compile the PO files above a given percentage
for file in po_files:
po = polib.pofile(str(file))
percent_translated = po.percent_translated()
if percent_translated >= COMPILATION_THRESHOLD:
print(
locale_name,
file.stem,
f"{percent_translated}%",
"compiling...",
)
api.compile_language_pack(REPO_ROOT, file.stem, [locale.name])
any_compiled = True
else:
print(
locale_name,
file.stem,
f"{percent_translated}% < {COMPILATION_THRESHOLD}%",
)
if any_compiled:
# Update the hash value
save_hash(package_dir, create_hash(*po_files))
# Update the contributors file
contributors = package_dir / CONTRIBUTORS
contributors.write_text(
get_contributors_report(
locale=locale_name, crowdin_key=crowdin_key
)
)
else:
print(f"No updates for the language package {locale_name}")
except BaseException:
print(
f"An error occurred when generating the language package {locale_name}"
)
traceback.print_exc()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Prepare JupyterLab language packages for release"
)
parser.add_argument("--new-version", help="New version of the language packages")
parser.add_argument(
"--crowdin-key", default=os.environ.get("CROWDIN_API"), help="Crowdin API key"
)
args = parser.parse_args()
if args.new_version:
if VERSION_REGEX.fullmatch(args.new_version) is None:
raise ValueError(
f"Version must be formatted as '<major>.<minor>.post<build>'; got {args.new_version}"
)
if args.crowdin_key is None:
raise ValueError(
"Crowdin API key needs to be set using either the "
"'--crowdin-key' option or the 'CROWDIN_API' environment variable"
)
prepare_jupyterlab_lp_release(args.crowdin_key, args.new_version)
|
the-stack_106_29354 | # -*- coding: utf-8 -*-
import csv
import logging
import os.path
from functools import wraps
from html import escape
from io import StringIO
from itertools import chain
from time import time
import gevent
from flask import Flask, make_response, jsonify, render_template, request
from flask_basicauth import BasicAuth
from gevent import pywsgi
from locust import __version__ as version
from .exception import AuthCredentialsError
from .log import greenlet_exception_logger
from .runners import MasterLocustRunner
from .stats import failures_csv, requests_csv, sort_stats
from .util.cache import memoize
from .util.rounding import proper_round
from .util.timespan import parse_timespan
logger = logging.getLogger(__name__)
greenlet_exception_handler = greenlet_exception_logger(logger)
DEFAULT_CACHE_TIME = 2.0
class WebUI:
"""
Sets up and runs a Flask web app that can start and stop load tests using the
:attr:`environment.runner <locust.env.Environment.runner>` as well as show the load test statistics
in :attr:`environment.stats <locust.env.Environment.stats>`
"""
app = None
"""
Reference to the :class:`flask.Flask` app. Can be used to add additional web routes and customize
the Flask app in other various ways. Example::
from flask import request
@web_ui.app.route("/my_custom_route")
def my_custom_route():
return "your IP is: %s" % request.remote_addr
"""
greenlet = None
"""
Greenlet of the running web server
"""
server = None
"""Reference to the :class:`pyqsgi.WSGIServer` instance"""
def __init__(self, environment, host, port, auth_credentials=None):
"""
Create WebUI instance and start running the web server in a separate greenlet (self.greenlet)
Arguments:
environment: Reference to the curren Locust Environment
host: Host/interface that the web server should accept connections to
port: Port that the web server should listen to
auth_credentials: If provided, it will enable basic auth with all the routes protected by default.
Should be supplied in the format: "user:pass".
"""
environment.web_ui = self
self.environment = environment
self.host = host
self.port = port
app = Flask(__name__)
self.app = app
app.debug = True
app.root_path = os.path.dirname(os.path.abspath(__file__))
self.app.config["BASIC_AUTH_ENABLED"] = False
self.auth = None
self.greenlet = None
if auth_credentials is not None:
credentials = auth_credentials.split(':')
if len(credentials) == 2:
self.app.config["BASIC_AUTH_USERNAME"] = credentials[0]
self.app.config["BASIC_AUTH_PASSWORD"] = credentials[1]
self.app.config["BASIC_AUTH_ENABLED"] = True
self.auth = BasicAuth()
self.auth.init_app(self.app)
else:
raise AuthCredentialsError("Invalid auth_credentials. It should be a string in the following format: 'user.pass'")
@app.route('/')
@self.auth_required_if_enabled
def index():
if not environment.runner:
return make_response("Error: Locust Environment does not have any runner", 500)
is_distributed = isinstance(environment.runner, MasterLocustRunner)
if is_distributed:
worker_count = environment.runner.worker_count
else:
worker_count = 0
override_host_warning = False
if environment.host:
host = environment.host
elif environment.runner.locust_classes:
all_hosts = set([l.host for l in environment.runner.locust_classes])
if len(all_hosts) == 1:
host = list(all_hosts)[0]
else:
# since we have mulitple Locust classes with different host attributes, we'll
# inform that specifying host will override the host for all Locust classes
override_host_warning = True
host = None
else:
host = None
return render_template("index.html",
state=environment.runner.state,
is_distributed=is_distributed,
user_count=environment.runner.user_count,
version=version,
host=host,
override_host_warning=override_host_warning,
worker_count=worker_count,
is_step_load=environment.step_load,
)
@app.route('/swarm', methods=["POST"])
@self.auth_required_if_enabled
def swarm():
assert request.method == "POST"
locust_count = int(request.form["locust_count"])
hatch_rate = float(request.form["hatch_rate"])
if (request.form.get("host")):
environment.host = str(request.form["host"])
if environment.step_load:
step_locust_count = int(request.form["step_locust_count"])
step_duration = parse_timespan(str(request.form["step_duration"]))
environment.runner.start_stepload(locust_count, hatch_rate, step_locust_count, step_duration)
return jsonify({'success': True, 'message': 'Swarming started in Step Load Mode', 'host': environment.host})
environment.runner.start(locust_count, hatch_rate)
return jsonify({'success': True, 'message': 'Swarming started', 'host': environment.host})
@app.route('/stop')
@self.auth_required_if_enabled
def stop():
environment.runner.stop()
return jsonify({'success':True, 'message': 'Test stopped'})
@app.route("/stats/reset")
@self.auth_required_if_enabled
def reset_stats():
environment.runner.stats.reset_all()
environment.runner.exceptions = {}
return "ok"
@app.route("/stats/requests/csv")
@self.auth_required_if_enabled
def request_stats_csv():
response = make_response(requests_csv(self.environment.runner.stats))
file_name = "requests_{0}.csv".format(time())
disposition = "attachment;filename={0}".format(file_name)
response.headers["Content-type"] = "text/csv"
response.headers["Content-disposition"] = disposition
return response
@app.route("/stats/failures/csv")
@self.auth_required_if_enabled
def failures_stats_csv():
response = make_response(failures_csv(self.environment.runner.stats))
file_name = "failures_{0}.csv".format(time())
disposition = "attachment;filename={0}".format(file_name)
response.headers["Content-type"] = "text/csv"
response.headers["Content-disposition"] = disposition
return response
@app.route('/stats/requests')
@self.auth_required_if_enabled
@memoize(timeout=DEFAULT_CACHE_TIME, dynamic_timeout=True)
def request_stats():
stats = []
for s in chain(sort_stats(self.environment.runner.stats.entries), [environment.runner.stats.total]):
stats.append({
"method": s.method,
"name": s.name,
"safe_name": escape(s.name, quote=False),
"num_requests": s.num_requests,
"num_failures": s.num_failures,
"avg_response_time": s.avg_response_time,
"min_response_time": 0 if s.min_response_time is None else proper_round(s.min_response_time),
"max_response_time": proper_round(s.max_response_time),
"current_rps": s.current_rps,
"current_fail_per_sec": s.current_fail_per_sec,
"median_response_time": s.median_response_time,
"ninetieth_response_time": s.get_response_time_percentile(0.9),
"avg_content_length": s.avg_content_length,
})
errors = []
for e in environment.runner.errors.values():
err_dict = e.to_dict()
err_dict["name"] = escape(err_dict["name"])
err_dict["error"] = escape(err_dict["error"])
errors.append(err_dict)
# Truncate the total number of stats and errors displayed since a large number of rows will cause the app
# to render extremely slowly. Aggregate stats should be preserved.
report = {"stats": stats[:500], "errors": errors[:500]}
if len(stats) > 500:
report["stats"] += [stats[-1]]
if stats:
report["total_rps"] = stats[len(stats)-1]["current_rps"]
report["fail_ratio"] = environment.runner.stats.total.fail_ratio
report["current_response_time_percentile_95"] = environment.runner.stats.total.get_current_response_time_percentile(0.95)
report["current_response_time_percentile_50"] = environment.runner.stats.total.get_current_response_time_percentile(0.5)
is_distributed = isinstance(environment.runner, MasterLocustRunner)
if is_distributed:
workers = []
for worker in environment.runner.clients.values():
workers.append({"id":worker.id, "state":worker.state, "user_count": worker.user_count, "cpu_usage":worker.cpu_usage})
report["workers"] = workers
report["state"] = environment.runner.state
report["user_count"] = environment.runner.user_count
return jsonify(report)
@app.route("/exceptions")
@self.auth_required_if_enabled
def exceptions():
return jsonify({
'exceptions': [
{
"count": row["count"],
"msg": row["msg"],
"traceback": row["traceback"],
"nodes" : ", ".join(row["nodes"])
} for row in environment.runner.exceptions.values()
]
})
@app.route("/exceptions/csv")
@self.auth_required_if_enabled
def exceptions_csv():
data = StringIO()
writer = csv.writer(data)
writer.writerow(["Count", "Message", "Traceback", "Nodes"])
for exc in environment.runner.exceptions.values():
nodes = ", ".join(exc["nodes"])
writer.writerow([exc["count"], exc["msg"], exc["traceback"], nodes])
data.seek(0)
response = make_response(data.read())
file_name = "exceptions_{0}.csv".format(time())
disposition = "attachment;filename={0}".format(file_name)
response.headers["Content-type"] = "text/csv"
response.headers["Content-disposition"] = disposition
return response
# start the web server
self.greenlet = gevent.spawn(self.start)
self.greenlet.link_exception(greenlet_exception_handler)
def start(self):
self.server = pywsgi.WSGIServer((self.host, self.port), self.app, log=None)
self.server.serve_forever()
def stop(self):
"""
Stop the running web server
"""
self.server.stop()
def auth_required_if_enabled(self, view_func):
"""
Decorator that can be used on custom route methods that will turn on Basic Auth
authentication if the ``--web-auth`` flag is used. Example::
@web_ui.app.route("/my_custom_route")
@web_ui.auth_required_if_enabled
def my_custom_route():
return "custom response"
"""
@wraps(view_func)
def wrapper(*args, **kwargs):
if self.app.config["BASIC_AUTH_ENABLED"]:
if self.auth.authenticate():
return view_func(*args, **kwargs)
else:
return self.auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
|
the-stack_106_29355 | import math
import types
from typing import Optional, Union
import numba
import numba.experimental
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
from pandas.core.strings import StringMethods
from fletcher._algorithms import _extract_isnull_bitmap
from fletcher.algorithms.bool import all_true_like
from fletcher.algorithms.string import (
_endswith,
_slice_handle_chunk,
_startswith,
_text_cat,
_text_cat_chunked,
_text_cat_chunked_mixed,
_text_contains_case_sensitive,
_text_count_case_sensitive,
_text_replace_case_sensitive,
_text_strip,
)
from fletcher.base import (
FletcherBaseArray,
FletcherChunkedArray,
FletcherContinuousArray,
)
def buffers_as_arrays(sa):
buffers = sa.buffers()
return (
_extract_isnull_bitmap(sa, 0, len(sa)),
np.asarray(buffers[1]).view(np.uint32),
np.asarray(buffers[2]).view(np.uint8),
)
@numba.experimental.jitclass(
[
("missing", numba.uint8[:]),
("offsets", numba.uint32[:]),
("data", numba.optional(numba.uint8[:])),
("offset", numba.int64),
]
)
class NumbaStringArray:
"""Wrapper around arrow's StringArray for use in numba functions.
Usage::
NumbaStringArray.make(array)
"""
def __init__(self, missing, offsets, data, offset):
self.missing = missing
self.offsets = offsets
self.data = data
self.offset = offset
@property
def byte_size(self):
# TODO: offset?
return self.data.shape[0]
@property
def size(self):
return len(self.offsets) - 1 - self.offset
def isnull(self, str_idx):
str_idx += self.offset
byte_idx = str_idx // 8
bit_mask = 1 << (str_idx % 8)
return (self.missing[byte_idx] & bit_mask) == 0
def byte_length(self, str_idx):
str_idx += self.offset
return self.offsets[str_idx + 1] - self.offsets[str_idx]
def get_byte(self, str_idx, byte_idx):
str_idx += self.offset
full_idx = self.offsets[str_idx] + byte_idx
return self.data[full_idx]
def length(self, str_idx):
result = 0
byte_length = self.byte_length(str_idx)
current = 0
while current < byte_length:
_, inc = self.get(str_idx, current)
current += inc
result += 1
return result
# TODO: implement this
def get(self, str_idx, byte_idx):
b = self.get_byte(str_idx, byte_idx)
if b > 127:
raise ValueError()
return b, 1
def decode(self, str_idx):
byte_length = self.byte_length(str_idx)
buffer = np.zeros(byte_length, np.int32)
i = 0
j = 0
while i < byte_length:
code, inc = self.get(str_idx, i)
buffer[j] = code
i += inc
j += 1
return buffer[:j]
def _make(cls, sa):
if not isinstance(sa, pa.StringArray):
sa = pa.array(sa, pa.string())
return cls(*buffers_as_arrays(sa), offset=sa.offset)
# @classmethod does not seem to be supported
NumbaStringArray.make = types.MethodType(_make, NumbaStringArray) # type: ignore
@numba.experimental.jitclass(
[("start", numba.uint32), ("end", numba.uint32), ("data", numba.uint8[:])]
)
class NumbaString:
def __init__(self, data, start=0, end=None):
if end is None:
end = data.shape[0]
self.data = data
self.start = start
self.end = end
@property
def length(self):
return self.end - self.start
def get_byte(self, i):
return self.data[self.start + i]
def _make_string(cls, obj):
if isinstance(obj, str):
data = obj.encode("utf8")
data = np.asarray(memoryview(data))
return cls(data, 0, len(data))
raise TypeError()
NumbaString.make = types.MethodType(_make_string, NumbaString) # type: ignore
@numba.experimental.jitclass(
[
("missing", numba.uint8[:]),
("offsets", numba.uint32[:]),
("data", numba.optional(numba.uint8[:])),
("string_position", numba.uint32),
("byte_position", numba.uint32),
("string_capacity", numba.uint32),
("byte_capacity", numba.uint32),
]
)
class NumbaStringArrayBuilder:
def __init__(self, string_capacity, byte_capacity):
self.missing = np.ones(_missing_capactiy(string_capacity), np.uint8)
self.offsets = np.zeros(string_capacity + 1, np.uint32)
self.data = np.zeros(byte_capacity, np.uint8)
self.string_position = 0
self.byte_position = 0
self.string_capacity = string_capacity
self.byte_capacity = byte_capacity
def increase_string_capacity(self, string_capacity):
assert string_capacity > self.string_capacity
missing = np.zeros(_missing_capactiy(string_capacity), np.uint8)
missing[: _missing_capactiy(self.string_capacity)] = self.missing
self.missing = missing
offsets = np.zeros(string_capacity + 1, np.uint32)
offsets[: self.string_capacity + 1] = self.offsets
self.offsets = offsets
self.string_capacity = string_capacity
def increase_byte_capacity(self, byte_capacity):
assert byte_capacity > self.byte_capacity
data = np.zeros(byte_capacity, np.uint8)
data[: self.byte_capacity] = self.data
self.data = data
self.byte_capacity = byte_capacity
def put_byte(self, b):
if self.byte_position >= self.byte_capacity:
self.increase_byte_capacity(int(math.ceil(1.2 * self.byte_capacity)))
self.data[self.byte_position] = b
self.byte_position += 1
def finish_string(self):
if self.string_position >= self.string_capacity:
self.increase_string_capacity(int(math.ceil(1.2 * self.string_capacity)))
self.offsets[self.string_position + 1] = self.byte_position
byte_idx = self.string_position // 8
self.missing[byte_idx] |= 1 << (self.string_position % 8)
self.string_position += 1
def finish_null(self):
if self.string_position >= self.string_capacity:
self.increase_string_capacity(int(math.ceil(1.2 * self.string_capacity)))
self.offsets[self.string_position + 1] = self.byte_position
byte_idx = self.string_position // 8
self.missing[byte_idx] &= ~(1 << (self.string_position % 8))
self.string_position += 1
def finish(self):
self.missing = self.missing[: _missing_capactiy(self.string_position)]
self.offsets = self.offsets[: self.string_position + 1]
self.data = self.data[: self.byte_position]
@numba.jit
def _missing_capactiy(capacity):
return int(math.ceil(capacity / 8))
class TextAccessorBase:
"""Base class for ``.fr_str`` and ``.fr_strx`` accessors."""
def __init__(self, obj):
self.obj = obj
self.data = self.obj.values.data
def _series_like(self, array: Union[pa.Array, pa.ChunkedArray]) -> pd.Series:
"""Return an Arrow result as a series with the same base classes as the input."""
return pd.Series(
type(self.obj.values)(array),
dtype=type(self.obj.dtype)(array.type),
index=self.obj.index,
)
def _call_str_accessor(self, func, *args, **kwargs) -> pd.Series:
"""Call the str accessor function with transforming the Arrow series to pandas series
and back."""
pd_series = self.data.to_pandas()
array = pa.array(getattr(pd_series.str, func)(*args, **kwargs).values)
return self._series_like(array)
def _wrap_str_accessor(self, func):
"""Return a str accessor function that includes the transformation from Arrow series
to pandas series and back."""
def _wrapped_str_accessor(*args, **kwargs) -> pd.Series:
return self._call_str_accessor(func, *args, **kwargs)
return _wrapped_str_accessor
@staticmethod
def _validate_str_accessor(func):
"""Raise an exception if the given function name is not a valid function of StringMethods."""
if not (
hasattr(pd.core.strings.StringMethods, func)
and callable(getattr(pd.core.strings.StringMethods, func))
):
raise AttributeError(
f"{func} not available in pd.core.strings.StringMethods nor in fletcher.string_array.TextAccessor"
)
@pd.api.extensions.register_series_accessor("fr_str")
class TextAccessorExt(TextAccessorBase):
"""Accessor for pandas exposed as ``.fr_str``."""
def __init__(self, obj):
"""Accessor for pandas exposed as ``.fr_str``.
fletcher functionality will be used if available otherwise str functions are invoked."""
if not isinstance(obj.values, FletcherBaseArray):
# call StringMethods to validate the input obj
StringMethods(obj)
super().__init__(obj)
def __getattr__(self, name):
TextAccessorBase._validate_str_accessor(name)
if isinstance(self.obj.values, FletcherBaseArray):
if hasattr(TextAccessor, name) and callable(getattr(TextAccessor, name)):
return getattr(TextAccessor(self.obj), name)
return self._wrap_str_accessor(name)
return getattr(self.obj.str, name)
@pd.api.extensions.register_series_accessor("fr_strx")
class TextAccessor(TextAccessorBase):
"""Accessor for pandas exposed as ``.fr_strx``."""
def __init__(self, obj):
if not isinstance(obj.values, FletcherBaseArray):
raise AttributeError(
"only Fletcher{Continuous,Chunked}Array[string] has text accessor"
)
super().__init__(obj)
def cat(self, others: Optional[FletcherBaseArray]) -> pd.Series:
"""
Concatenate strings in the Series/Index with given separator.
If `others` is specified, this function concatenates the Series/Index
and elements of `others` element-wise.
If `others` is not passed, then all values in the Series/Index are
concatenated into a single string with a given `sep`.
"""
if not isinstance(others, pd.Series):
raise NotImplementedError(
"other needs to be Series of Fletcher{Chunked,Continuous}Array"
)
elif isinstance(others.values, FletcherChunkedArray):
return pd.Series(
FletcherChunkedArray(_text_cat_chunked(self.data, others.values.data))
)
elif not isinstance(others.values, FletcherContinuousArray):
raise NotImplementedError("other needs to be FletcherContinuousArray")
if isinstance(self.obj.values, FletcherChunkedArray):
return pd.Series(
FletcherChunkedArray(
_text_cat_chunked_mixed(self.data, others.values.data)
)
)
else: # FletcherContinuousArray
return pd.Series(
FletcherContinuousArray(_text_cat(self.data, others.values.data))
)
def contains(self, pat: str, case: bool = True, regex: bool = True) -> pd.Series:
"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
This implementation differs to the one in ``pandas``:
* We always return a missing for missing data.
* You cannot pass flags for the regular expression module.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
"""
if not regex:
if len(pat) == 0:
# For an empty pattern return all-True array
return self._series_like(all_true_like(self.data))
if case:
contains_exact = getattr(
pc, "binary_contains_exact", _text_contains_case_sensitive
)
# Can just check for a match on the byte-sequence
return self._series_like(contains_exact(self.data, pat))
else:
# Check if pat is all-ascii, then use lookup-table for lowercasing
# else: use libutf8proc
pass
return self._call_str_accessor("contains", pat=pat, case=case, regex=regex)
def count(self, pat: str, case: bool = True, regex: bool = True) -> pd.Series:
if not regex:
if case:
return self._series_like(_text_count_case_sensitive(self.data, pat))
return self._call_str_accessor("count", pat=pat)
def replace(
self, pat: str, repl: str, n: int = -1, case: bool = True, regex: bool = True
):
"""
Replace occurrences of pattern/regex in the Series/Index with some other string.
Equivalent to str.replace() or re.sub().
Return а string Series where in each row the occurrences of the given
pattern or regex ``pat`` are replaced by ``repl``.
This implementation differs to the one in ``pandas``:
* We always return a missing for missing data.
* You cannot pass flags for the regular expression module.
Parameters
----------
pat : str
Character sequence or regular expression.
repl : str
Replacement string.
n : int
Number of replacements to make from start.
case : bool, default True
If True, case sensitive.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series of string values.
"""
if n == 0:
return self._series_like(self.data)
if not regex:
if case:
return self._series_like(
_text_replace_case_sensitive(self.data, pat, repl, n)
)
return self._call_str_accessor(
"replace", pat=pat, repl=repl, n=n, case=case, regex=regex
)
def strip(self, to_strip=None):
"""Strip whitespaces from both ends of strings."""
# see for unicode spaces: https://en.wikibooks.org/wiki/Unicode/Character_reference/2000-2FFF
# for whatever reason 0x200B (zero width space) is not considered a space by pandas.split()
if to_strip is None:
to_strip = (
" \t\r\n\x85\x1f\x1e\x1d\x1c\x0c\x0b\xa0"
"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2000\u2009\u200A\u2028\u2029\u202F\u205F"
)
return self._series_like(_text_strip(self.data, to_strip))
def zfill(self, width: int) -> pd.Series:
"""Pad strings in the Series/Index by prepending '0' characters."""
return self._call_str_accessor("zfill", width)
def startswith(self, pat):
"""Check whether a row starts with a certain pattern."""
return self._call_x_with(_startswith, pat)
def endswith(self, pat):
"""Check whether a row ends with a certain pattern."""
return self._call_x_with(_endswith, pat)
def _call_x_with(self, impl, needle, na=None):
needle = NumbaString.make(needle) # type: ignore
result = np.zeros(len(self.data), dtype=np.uint8)
if isinstance(self.data, pa.ChunkedArray):
offset = 0
for chunk in self.data.chunks:
str_arr = NumbaStringArray.make(chunk) # type: ignore
impl(str_arr, needle, 2, offset, result)
offset += len(chunk)
else:
str_arr = NumbaStringArray.make(self.data) # type: ignore
impl(str_arr, needle, 2, 0, result)
return pd.Series(
type(self.obj.values)(pa.array(result.astype(bool), mask=(result == 2)))
)
def slice(self, start=0, end=None, step=1):
"""Extract every `step` character from strings from `start` to `end`."""
return self._series_like(_slice_handle_chunk(self.data, start, end, step))
|
the-stack_106_29356 | #!/usr/bin/env python
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Setup linked packages required for GGRC
"""
import subprocess
def run_shell(script):
proc = subprocess.Popen(script, shell=True, stdout=subprocess.PIPE)
proc.wait()
return proc.returncode, proc.stdout.read()
def ln_package(source, target, force=True):
force = "-f" if force else ""
command = "ln %s -s %s %s" % (force, source, target)
if source.startswith(target):
print("Skipping possible self-link of %s" % (source,))
else:
print(command)
return subprocess.call(command, shell=True)
def _run_system_python_script_output(script, unindent=None):
# Unindent and escape script
script_lines = script.lstrip('\n').rstrip().splitlines()
script_lines = map(lambda line: line[unindent:], script_lines)
script = "\\n".join(script_lines)
script = """%(python_path)s -c 'exec("%(script)s")'""" % {
'python_path': "/usr/bin/python",
'script': script,
}
return_code, result = run_shell(script)
if return_code == 0:
if len(result.strip()) > 0:
return result.strip()
else:
return None
else:
return None
def _get_system_python_import_path(module, path_getter):
script = """
try:
import %(module)s
print(%(path_getter)s)
except ImportError, e:
print ""
""" % {
'module': module,
'path_getter': path_getter
}
return _run_system_python_script_output(script, unindent=4)
def setup_mysql_packages(packages_dir):
"""
This links MySQLdb module to opt/packages for use inside
the otherwise-isolated virtual environment
"""
module_path = _get_system_python_import_path(
'MySQLdb', 'MySQLdb.__path__[0]')
if module_path:
ln_package(module_path, packages_dir)
else:
print("Failed to import MySQLdb -- ensure it is available")
module_path = _get_system_python_import_path('_mysql', '_mysql.__file__')
if module_path:
ln_package(module_path, packages_dir)
else:
print("Failed to import _mysql -- ensure it is available")
module_path = _get_system_python_import_path(
'_mysql_exceptions', '_mysql_exceptions.__file__')
if module_path:
ln_package(module_path, packages_dir)
else:
print("Failed to import _mysql_exceptions -- ensure it is available")
def setup_imaging_packages(packages_dir):
module_path = _get_system_python_import_path('PIL', 'PIL.__path__[0]')
if module_path:
ln_package(module_path, packages_dir)
else:
print("Failed to import PIL -- ensure it is available")
def setup_google_packages(opt_dir, packages_dir):
"""
This links the `google` package from google_appengine
to opt/packages for use inside the virtual environment
"""
try:
import google
google_path = google.__path__[0]
except ImportError:
google_path = "%s/google_appengine/google" % (opt_dir,)
ln_package(google_path, packages_dir)
def main(packages_dir):
command = "mkdir -p {packages_dir}".format(packages_dir=packages_dir)
print(command)
subprocess.call(command, shell=True)
setup_mysql_packages(packages_dir)
setup_imaging_packages(packages_dir)
if __name__ == '__main__':
import sys
main(sys.argv[1].strip())
|
the-stack_106_29358 | import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from ..registry import NECKS
from ..utils import ConvModule
@NECKS.register_module
class FPN(nn.Module):
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
conv_cfg=None,
norm_cfg=None,
activation=None):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
# inputs: C1, C2, C3, C4
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
laterals[i - 1] += F.interpolate(
laterals[i], scale_factor=2, mode='nearest')
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
] # P2, P3, P4, P5
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[used_backbone_levels](orig)) # P6
else:
outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1]))) # P7. 这里加ReLU应该是为了引入非线性变换,不然P6~P7就是两层conv层
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
|
the-stack_106_29359 | from inputs import Input
from urllib.parse import urljoin
from util.constants import PMS_WATCH_HISTORY, EB_NEW_SEEN_EP, bus
from util.video import Video, VideoType, VideoSchema
from joblib import Memory
import requests
import xml.etree.ElementTree as ET
import time
import re
import logging
import os
import discord
import json
class Discord(Input):
class VideoDescriptor(object):
def __init__(self, j):
self.__dict__ = json.loads(j)
def __init__(self, name, config, cache_folder):
self.name = name
self.config = config
self.cache_folder = cache_folder
self.prefix = 'see-them-all[{0}]: '.format(self.config.get("uid"))
def recently_watched(self):
client = discord.Client()
@client.event
async def on_ready():
channel = client.get_channel(id=int(self.config.get("channel")))
list = await channel.history().flatten()
videos = []
for message in list:
if len(message.content) > 6:
msg = message.content[3:len(message.content)-3]
if msg.startswith(self.prefix):
d = VideoSchema().load(json.loads(msg[len(self.prefix):])).data
if d.type_ == VideoType.EPISODE:
videos.append(Video(
d.title, VideoType.EPISODE,
Video.Id(tvdb_id=d.ids.tvdb_id, imdb_id=d.ids.imdb_id, tmdb_id=d.ids.tmdb_id),
d.season, d.episode
))
elif d.type_ == VideoType.MOVIE:
videos.append(Video(
d.title, VideoType.MOVIE,
Video.Id(tvdb_id=d.ids.tvdb_id, imdb_id=d.ids.imdb_id, tmdb_id=d.ids.tmdb_id)
))
bus.emit('{0}:{1}'.format(EB_NEW_SEEN_EP, self.name), videos)
await client.close()
client.run(self.config.get("token"))
|
the-stack_106_29360 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import csv
from hashlib import md5
from mock import patch
from StringIO import StringIO
from nailgun.db.sqlalchemy.models import Task
from nailgun.openstack.common import jsonutils
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
from nailgun.test.base import reverse
class TestHandlers(BaseIntegrationTest):
def _create_capacity_log(self):
resp = self.app.put(
reverse('CapacityLogHandler'),
headers=self.default_headers)
self.assertEqual(resp.status_code, 202)
capacity_task = self.db.query(Task).filter_by(
name="capacity_log"
).first()
self.env.wait_ready(capacity_task)
def _get_capacity_log_json(self):
resp = self.app.get(
reverse('CapacityLogHandler'),
headers=self.default_headers
)
return jsonutils.loads(resp.body)
@fake_tasks()
def test_capacity_log_handler(self):
self.env.create_node(api=False)
self._create_capacity_log()
capacity_log = self._get_capacity_log_json()
for field in ['id', 'report']:
self.assertTrue(field in capacity_log)
report = capacity_log['report']
report_fields = ['fuel_data', 'environment_stats', 'allocation_stats']
for field in report_fields:
self.assertTrue(field in report)
self.assertEqual(report['allocation_stats']['allocated'], 0)
self.assertEqual(report['allocation_stats']['unallocated'], 1)
@patch('nailgun.api.v1.handlers.version.settings.VERSION', {
'release': '0.1b'})
def test_capacity_csv_checksum(self):
self._create_capacity_log()
resp = self.app.get(reverse('CapacityLogCsvHandler'))
self.assertEqual(200, resp.status_code)
response_stream = StringIO(resp.body)
checksum = md5(''.join(response_stream.readlines()[:-2])).hexdigest()
response_stream.seek(0)
csvreader = csv.reader(response_stream, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
rows = [
['Fuel version', '0.1b'],
['Fuel UUID', 'Unknown'],
['Checksum', checksum],
['Environment Name', 'Node Count'],
['Total number allocated of nodes', '0'],
['Total number of unallocated nodes', '0'],
['Node role(s)', 'Number of nodes with this configuration'],
[],
]
for row in csvreader:
self.assertTrue(row in rows)
@fake_tasks()
def test_capacity_nodes_allocation(self):
self.env.create(
cluster_kwargs={
'name': 'test_name'
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller', 'cinder'], 'pending_addition': True},
{'roles': ['compute', 'cinder'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True},
{'roles': ['cinder'], 'pending_addition': True}
]
)
deployment_task = self.env.launch_deployment()
self.env.wait_ready(deployment_task)
self._create_capacity_log()
capacity_log = self._get_capacity_log_json()
report = capacity_log['report']
self.assertEqual(report['allocation_stats']['allocated'], 6)
self.assertEqual(report['allocation_stats']['unallocated'], 0)
self.assertEqual(report['roles_stat']['controller'], 2)
self.assertEqual(report['roles_stat']['cinder+controller'], 1)
self.assertEqual(report['roles_stat']['cinder+compute'], 1)
self.assertEqual(report['roles_stat']['compute'], 1)
self.assertEqual(report['roles_stat']['cinder'], 1)
self.assertEqual(len(report['environment_stats']), 1)
test_env = report['environment_stats'][0]
self.assertEqual(test_env['cluster'], 'test_name')
self.assertEqual(test_env['nodes'], 6)
@fake_tasks(godmode=True)
def test_capacity_csv_log_with_unicode(self):
self.env.create(
cluster_kwargs={
'name': u'тест'
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True}
]
)
deployment_task = self.env.launch_deployment()
self.env.wait_ready(deployment_task)
self._create_capacity_log()
resp = self.app.get(reverse('CapacityLogCsvHandler'))
self.assertEqual(200, resp.status_code)
|
the-stack_106_29361 | class ShortMapExit():
DATA_SIZE = 0x06
def __init__(self):
self.x = 0
self.y = 0
def from_data(self, data):
assert(len(data) == self.DATA_SIZE)
self.x = data[0]
self.y = data[1]
self.dest_map = data[2] | (data[3] & 0x01) << 8
self.unknown = data[3] & 0xfe
self.dest_x = data[4]
self.dest_y = data[5]
def to_data(self):
data = [0x00] * self.DATA_SIZE
data[0] = self.x
data[1] = self.y
data[2] = self.dest_map & 0xff
data[3] = ((self.dest_map & 0x100) >> 8) | self.unknown
data[4] = self.dest_x
data[5] = self.dest_y
return data
def print(self):
print("{}, {} -> {}: {}, {} ({})".format(self.x, self.y, hex(self.dest_map), self.dest_x, self.dest_y, hex(self.unknown)))
class LongMapExit():
DATA_SIZE = 0x07
def __init__(self):
self.x = 0
self.y = 0
def from_data(self, data):
assert(len(data) == self.DATA_SIZE)
self.x = data[0]
self.y = data[1]
self.size = data[2] & 0x7f
self.direction = data[2] & 0x80 # horizontal/vertical
self.dest_map = data[3] | (data[4] & 0x01) << 8
self.unknown = data[4] & 0xfe
self.dest_x = data[5]
self.dest_y = data[6]
def to_data(self):
data = [0x00] * self.DATA_SIZE
data[0] = self.x
data[1] = self.y
data[2] = self.size | self.direction
data[3] = self.dest_map & 0xff
data[4] = ((self.dest_map & 0x100) >> 8) | self.unknown
data[5] = self.dest_x
data[6] = self.dest_y
return data
def print(self):
print("{}, {} {}, {} -> {}: {}, {}".format(self.x, self.y, self.size, self.direction, hex(self.dest_map), self.dest_x, self.dest_y))
|
the-stack_106_29364 | import codecs
class Sia30Flagger():
def __init__(self):
self.sensitiveWords = ["جمهوری", "ج.ا", "اسلامی", "ایران", "مرگ", "جاعش", "خامنه", "احمدی نژاد",
"روحانی", "@azarijahromi", "اینا", "مجلس", "قوه قضاییه", "رئیس", "آخوند", "شاه ", "حجاب", "آمریکا"]
def filterSia30(self, tweets_list):
sensitiveTweets = []
for i in range(len(tweets_list)):
foundWords = []
for j in range(len(self.sensitiveWords)):
if(len(self.sensitiveWords[j]) == 0):
continue
if tweets_list[i]["text"].find(self.sensitiveWords[j]) > -1:
foundWords.append(self.sensitiveWords[j])
if len(foundWords) > 0:
sensitiveTweets.append(
{"tweet": tweets_list[i], "foundWords": foundWords})
return sensitiveTweets
def groupByFilteredSia30(self, sensitiveTweets):
uniqueId = {}
for i in range(len(sensitiveTweets)):
id_str = sensitiveTweets[i]["tweet"]["id_str"]
if id_str in uniqueId.keys():
uniqueId[id_str][0]["foundWords"] += ' '.join(
[str(elem)+" " for elem in sensitiveTweets[i]["foundWords"]])
else:
uniqueId[id_str] = []
uniqueId[id_str].append({"foundWords": ' '.join(
[str(elem) for elem in sensitiveTweets[i]["foundWords"]])})
uniqueId[id_str].append(
{"tweet": sensitiveTweets[i]["tweet"]})
return uniqueId
def writeUniqueFilteredTweetsReport(self, sensitiveTweets_unique):
file = codecs.open("output.html", "w", "utf-8")
file.write("<div style='direction: rtl;'>")
for tweetKey in sensitiveTweets_unique.keys():
file.write("<p><a target='_blank' href='https://twitter.com/i/web/status/" +
tweetKey + "'>"+sensitiveTweets_unique[tweetKey][1]["tweet"]["text"]+" < /a >")
file.write(
" " + sensitiveTweets_unique[tweetKey][0]["foundWords"]+"</p>")
file.write("<br>")
file.write("</div>")
file.close()
|
the-stack_106_29368 | # BSD 2-Clause License
#
# Copyright (c) 2021-2022, Hewlett Packard Enterprise
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
from .step import Step
class LocalStep(Step):
def __init__(self, name, cwd, run_settings):
super().__init__(name, cwd)
self.run_settings = run_settings
self.env = self._set_env()
def get_launch_cmd(self):
cmd = []
# Add run command and args if user specified
# default is no run command for local job steps
if self.run_settings.run_command:
cmd.append(self.run_settings.run_command)
run_args = self.run_settings.format_run_args()
cmd.extend(run_args)
if self.run_settings.colocated_db_settings:
# Replace the command with the entrypoint wrapper script
bash = shutil.which("bash")
launch_script_path = self.get_colocated_launch_script()
cmd.extend([bash, launch_script_path])
# build executable
cmd.extend(self.run_settings.exe)
if self.run_settings.exe_args:
cmd.extend(self.run_settings.exe_args)
return cmd
def _set_env(self):
env = os.environ.copy()
if self.run_settings.env_vars:
for k, v in self.run_settings.env_vars.items():
env[k] = v
return env
|
the-stack_106_29370 | #
# Single Particle Model with Electrolyte (SPMe)
#
import pybamm
from .base_lithium_ion_model import BaseModel
class SPMe(BaseModel):
"""Single Particle Model with Electrolyte (SPMe) of a lithium-ion battery, from
[1]_.
Parameters
----------
options : dict, optional
A dictionary of options to be passed to the model.
name : str, optional
The name of the model.
build : bool, optional
Whether to build the model on instantiation. Default is True. Setting this
option to False allows users to change any number of the submodels before
building the complete model (submodels cannot be changed after the model is
built).
References
----------
.. [1] SG Marquis, V Sulzer, R Timms, CP Please and SJ Chapman. “An asymptotic
derivation of a single particle model with electrolyte”. Journal of The
Electrochemical Society, 166(15):A3693–A3706, 2019
**Extends:** :class:`pybamm.lithium_ion.BaseModel`
"""
def __init__(
self, options=None, name="Single Particle Model with electrolyte", build=True
):
super().__init__(options, name)
self.set_external_circuit_submodel()
self.set_porosity_submodel()
self.set_tortuosity_submodels()
self.set_convection_submodel()
self.set_interfacial_submodel()
self.set_other_reaction_submodels_to_zero()
self.set_particle_submodel()
self.set_negative_electrode_submodel()
self.set_electrolyte_submodel()
self.set_positive_electrode_submodel()
self.set_thermal_submodel()
self.set_current_collector_submodel()
self.set_sei_submodel()
if build:
self.build_model()
pybamm.citations.register("marquis2019asymptotic")
def set_porosity_submodel(self):
self.submodels["porosity"] = pybamm.porosity.Constant(self.param)
def set_convection_submodel(self):
self.submodels[
"through-cell convection"
] = pybamm.convection.through_cell.NoConvection(self.param)
self.submodels[
"transverse convection"
] = pybamm.convection.transverse.NoConvection(self.param)
def set_tortuosity_submodels(self):
self.submodels["electrolyte tortuosity"] = pybamm.tortuosity.Bruggeman(
self.param, "Electrolyte", True
)
self.submodels["electrode tortuosity"] = pybamm.tortuosity.Bruggeman(
self.param, "Electrode", True
)
def set_interfacial_submodel(self):
self.submodels["negative interface"] = pybamm.interface.InverseButlerVolmer(
self.param, "Negative", "lithium-ion main"
)
self.submodels["positive interface"] = pybamm.interface.InverseButlerVolmer(
self.param, "Positive", "lithium-ion main"
)
def set_particle_submodel(self):
if self.options["particle"] == "Fickian diffusion":
self.submodels["negative particle"] = pybamm.particle.FickianSingleParticle(
self.param, "Negative"
)
self.submodels["positive particle"] = pybamm.particle.FickianSingleParticle(
self.param, "Positive"
)
elif self.options["particle"] == "fast diffusion":
self.submodels["negative particle"] = pybamm.particle.FastSingleParticle(
self.param, "Negative"
)
self.submodels["positive particle"] = pybamm.particle.FastSingleParticle(
self.param, "Positive"
)
def set_negative_electrode_submodel(self):
self.submodels["negative electrode"] = pybamm.electrode.ohm.Composite(
self.param, "Negative"
)
def set_positive_electrode_submodel(self):
self.submodels["positive electrode"] = pybamm.electrode.ohm.Composite(
self.param, "Positive"
)
def set_electrolyte_submodel(self):
self.submodels[
"electrolyte conductivity"
] = pybamm.electrolyte_conductivity.Composite(self.param)
self.submodels["electrolyte diffusion"] = pybamm.electrolyte_diffusion.Full(
self.param
)
@property
def default_geometry(self):
dimensionality = self.options["dimensionality"]
if dimensionality == 0:
return pybamm.Geometry("1D macro", "1D micro")
elif dimensionality == 1:
return pybamm.Geometry("1+1D macro", "(1+0)+1D micro")
elif dimensionality == 2:
return pybamm.Geometry("2+1D macro", "(2+0)+1D micro")
|
the-stack_106_29371 | import os
import numpy as np
import pandas as pd
from larval_gonad.io import pickle_load
def main():
fbgn2chrom = get_fbgn2chrom()
yogn2fbgn = pickle_load(snakemake.input.orthologs)
adult_counts = get_counts(yogn2fbgn)
df = (
adult_counts.reset_index()
.melt(id_vars="FBgn", var_name="sample_ID", value_name="Count")
.assign(strain="w1118")
.assign(stage="adult")
.assign(
tissue=lambda x: x.sample_ID.str.extract(
r"w1118_(\w+)_\w+_r\d", expand=False
)
)
.assign(
sex=lambda x: x.sample_ID.str.extract(
r"w1118_\w+_(\w+)_r\d", expand=False
)
)
.assign(
rep=lambda x: x.sample_ID.str.extract(r"w1118_\w+_\w+_r(\d)", expand=False)
)
.assign(data_source="RNA-Seq")
.set_index("FBgn")
.join(fbgn2chrom, how="inner")
.reset_index()
)
df.to_feather(snakemake.output[0])
def get_fbgn2chrom():
return (
pd.read_feather(snakemake.input.gene_annot, columns=["FBgn", "FB_chrom"])
.set_index("FBgn")
.query('FB_chrom in ["X", "2L", "2R", "3L", "3R", "4", "Y"]')
.squeeze()
.rename("chrom")
)
def get_counts(yogn2fbgn):
gene_counts = (
pd.read_feather(snakemake.input.counts)
.assign(FBgn=lambda x: x.YOgn.map(lambda y: yogn2fbgn.get(y, np.nan)))
.dropna()
.set_index("FBgn", drop=True)
.drop("YOgn", axis=1)
)
return gene_counts
if __name__ == "__main__":
main()
|
the-stack_106_29373 | import os
from os.path import join
from .. import run_nbgrader
from .base import BaseTestApp
from .conftest import notwindows
@notwindows
class TestNbGraderFetch(BaseTestApp):
def _release(self, assignment, exchange, course_dir, course="abc101"):
self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
run_nbgrader([
"release", assignment,
"--course", course,
"--TransferApp.exchange_directory={}".format(exchange)
])
def _fetch(self, assignment, exchange, flags=None, retcode=0, course="abc101"):
cmd = [
"fetch", assignment,
"--course", course,
"--TransferApp.exchange_directory={}".format(exchange)
]
if flags is not None:
cmd.extend(flags)
run_nbgrader(cmd, retcode=retcode)
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["fetch", "--help-all"])
def test_no_course_id(self, exchange, course_dir):
"""Does releasing without a course id thrown an error?"""
self._release("ps1", exchange, course_dir)
cmd = [
"fetch", "ps1",
"--TransferApp.exchange_directory={}".format(exchange)
]
run_nbgrader(cmd, retcode=1)
def test_fetch(self, exchange, course_dir):
self._release("ps1", exchange, course_dir)
self._fetch("ps1", exchange)
assert os.path.isfile(join("ps1", "p1.ipynb"))
# make sure it fails if the assignment already exists
self._fetch("ps1", exchange, retcode=1)
# make sure it fails even if the assignment is incomplete
os.remove(join("ps1", "p1.ipynb"))
self._fetch("ps1", exchange, retcode=1)
# make sure it passes if the --replace flag is given
self._fetch("ps1", exchange, flags=["--replace"])
assert os.path.isfile(join("ps1", "p1.ipynb"))
# make sure the --replace flag doesn't overwrite files, though
self._copy_file(join("files", "submitted-changed.ipynb"), join("ps1", "p1.ipynb"))
with open(join("ps1", "p1.ipynb"), "r") as fh:
contents1 = fh.read()
self._fetch("ps1", exchange, flags=["--replace"])
with open(join("ps1", "p1.ipynb"), "r") as fh:
contents2 = fh.read()
assert contents1 == contents2
def test_fetch_with_assignment_flag(self, exchange, course_dir):
self._release("ps1", exchange, course_dir)
self._fetch("--assignment=ps1", exchange)
assert os.path.isfile(join("ps1", "p1.ipynb"))
def test_fetch_multiple_courses(self, exchange, course_dir):
self._release("ps1", exchange, course_dir, course="abc101")
self._fetch("ps1", exchange, course="abc101", flags=["--TransferApp.path_includes_course=True"])
assert os.path.isfile(join("abc101", "ps1", "p1.ipynb"))
self._release("ps1", exchange, course_dir, course="abc102")
self._fetch("ps1", exchange, course="abc102", flags=["--TransferApp.path_includes_course=True"])
assert os.path.isfile(join("abc102", "ps1", "p1.ipynb"))
|
the-stack_106_29374 | """Stuff that differs in different Python versions and platform
distributions."""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import codecs
import locale
import logging
import os
import sys
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Union
__all__ = ["console_to_str", "get_path_uid", "stdlib_pkgs", "WINDOWS"]
logger = logging.getLogger(__name__)
def has_tls():
# type: () -> bool
try:
import _ssl # noqa: F401 # ignore unused
return True
except ImportError:
pass
from pip._vendor.urllib3.util import IS_PYOPENSSL
return IS_PYOPENSSL
def str_to_display(data, desc=None):
# type: (Union[bytes, str], Optional[str]) -> str
"""
For display or logging purposes, convert a bytes object (or text) to
text (e.g. unicode in Python 2) safe for output.
:param desc: An optional phrase describing the input data, for use in
the log message if a warning is logged. Defaults to "Bytes object".
This function should never error out and so can take a best effort
approach. It is okay to be lossy if needed since the return value is
just for display.
We assume the data is in the locale preferred encoding. If it won't
decode properly, we warn the user but decode as best we can.
We also ensure that the output can be safely written to standard output
without encoding errors.
"""
if isinstance(data, str):
return data
# Otherwise, data is a bytes object (str in Python 2).
# First, get the encoding we assume. This is the preferred
# encoding for the locale, unless that is not found, or
# it is ASCII, in which case assume UTF-8
encoding = locale.getpreferredencoding()
if (not encoding) or codecs.lookup(encoding).name == "ascii":
encoding = "utf-8"
# Now try to decode the data - if we fail, warn the user and
# decode with replacement.
try:
decoded_data = data.decode(encoding)
except UnicodeDecodeError:
logger.warning(
'%s does not appear to be encoded as %s',
desc or 'Bytes object',
encoding,
)
decoded_data = data.decode(encoding, errors="backslashreplace")
# Make sure we can print the output, by encoding it to the output
# encoding with replacement of unencodable characters, and then
# decoding again.
# We use stderr's encoding because it's less likely to be
# redirected and if we don't find an encoding we skip this
# step (on the assumption that output is wrapped by something
# that won't fail).
# The double getattr is to deal with the possibility that we're
# being called in a situation where sys.__stderr__ doesn't exist,
# or doesn't have an encoding attribute. Neither of these cases
# should occur in normal pip use, but there's no harm in checking
# in case people use pip in (unsupported) unusual situations.
output_encoding = getattr(getattr(sys, "__stderr__", None),
"encoding", None)
if output_encoding:
output_encoded = decoded_data.encode(
output_encoding,
errors="backslashreplace"
)
decoded_data = output_encoded.decode(output_encoding)
return decoded_data
def console_to_str(data):
# type: (bytes) -> str
"""Return a string, safe for output, of subprocess output.
"""
return str_to_display(data, desc='Subprocess output')
def get_path_uid(path):
# type: (str) -> int
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"{} is a symlink; Will not return uid for symlinks".format(
path)
)
return file_uid
# packages in the stdlib that may have installation metadata, but should not be
# considered 'installed'. this theoretically could be determined based on
# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
# make this ineffective, so hard-coding
stdlib_pkgs = {"python", "wsgiref", "argparse"}
# windows detection, covers cpython and ironpython
WINDOWS = (sys.platform.startswith("win") or
(sys.platform == 'cli' and os.name == 'nt'))
|
the-stack_106_29375 | #!/usr/bin/env python
from __future__ import division
import random
import math
import numpy as np
def process_statistics(F):
Flux1 = np.zeros(128)
Flux2 = np.zeros(128)
Flux1_squared = np.zeros(128)
Flux2_squared = np.zeros(128)
Current1 = np.zeros(129)
Current2 = np.zeros(129)
Current1_squared = np.zeros(129)
Current2_squared = np.zeros(129)
for data in F:
flux_1 = data[0]
flux_2 = data[1]
flux1_squared = data[2]
flux2_squared = data[3]
current1 = data[4]
current2 = data[5]
Flux1 += flux_1
Flux2 += flux_2
Flux1_squared += flux1_squared
Flux2_squared += flux2_squared
Current1 += current1
Current2 += current2
Current1_squared += current1**2
Current2_squared += current2**2
total_neutrons = (len(F) * neutrons)
Flux1_average = Flux1 / total_neutrons
Flux1_var = np.sqrt((1 / (total_neutrons - 1) * (Flux1_squared - Flux1_average**2))/total_neutrons)
Current1_average = Current1 / total_neutrons
Current1_var = np.sqrt((1 / (total_neutrons - 1) * (Current1_squared - Current1_average**2))/total_neutrons)
Flux2_average = Flux2 / total_neutrons
Flux2_var = np.sqrt((1 / (total_neutrons - 1) * (Flux2_squared - Flux2_average**2))/total_neutrons)
Current2_average = Current2 / total_neutrons
Current2_var = np.sqrt((1 / (total_neutrons - 1) * (Current2_squared - Current2_average**2))/total_neutrons)
return Flux1_average, Flux1_var, Flux2_average, Flux2_var, Current1_average, Current1_var, Current2_average, Current2_var
|
the-stack_106_29376 | # Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from concurrent import futures
from datetime import datetime
import grpc
import pandas as pd
import pytest
import pytz
from google.protobuf import json_format
import dataframes
import feast.core.CoreService_pb2_grpc as Core
from feast.client import Client
from feast.entity import Entity
from feast.feature_set import (
Feature,
FeatureSet,
FeatureSetRef,
_make_tfx_schema_domain_info_inline,
)
from feast.value_type import ValueType
from feast_core_server import CoreServicer
from tensorflow_metadata.proto.v0 import schema_pb2
CORE_URL = "core.feast.local"
SERVING_URL = "serving.feast.local"
class TestFeatureSet:
@pytest.fixture(scope="function")
def server(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Core.add_CoreServiceServicer_to_server(CoreServicer(), server)
server.add_insecure_port("[::]:50051")
server.start()
yield server
server.stop(0)
@pytest.fixture
def client(self, server):
return Client(core_url="localhost:50051")
def test_add_remove_features_success(self):
fs = FeatureSet("my-feature-set")
fs.add(Feature(name="my-feature-1", dtype=ValueType.INT64))
fs.add(Feature(name="my-feature-2", dtype=ValueType.INT64))
fs.drop(name="my-feature-1")
assert len(fs.features) == 1 and fs.features[0].name == "my-feature-2"
def test_remove_feature_failure(self):
with pytest.raises(ValueError):
fs = FeatureSet("my-feature-set")
fs.drop(name="my-feature-1")
def test_update_from_source_failure(self):
with pytest.raises(Exception):
df = pd.DataFrame()
fs = FeatureSet("driver-feature-set")
fs.infer_fields_from_df(df)
@pytest.mark.parametrize(
"dataframe,feature_count,entity_count,discard_unused_fields,features,entities",
[
(
dataframes.GOOD,
3,
1,
True,
[],
[Entity(name="entity_id", dtype=ValueType.INT64)],
),
(
dataframes.GOOD_FIVE_FEATURES,
5,
1,
True,
[],
[Entity(name="entity_id", dtype=ValueType.INT64)],
),
(
dataframes.GOOD_FIVE_FEATURES,
6,
1,
True,
[Feature(name="feature_6", dtype=ValueType.INT64)],
[Entity(name="entity_id", dtype=ValueType.INT64)],
),
(
dataframes.GOOD_FIVE_FEATURES_TWO_ENTITIES,
5,
2,
True,
[],
[
Entity(name="entity_1_id", dtype=ValueType.INT64),
Entity(name="entity_2_id", dtype=ValueType.INT64),
],
),
(
dataframes.GOOD_FIVE_FEATURES_TWO_ENTITIES,
6,
3,
False,
[],
[
Entity(name="entity_1_id", dtype=ValueType.INT64),
Entity(name="entity_2_id", dtype=ValueType.INT64),
],
),
(
dataframes.NO_FEATURES,
0,
1,
True,
[],
[Entity(name="entity_id", dtype=ValueType.INT64)],
),
(
pd.DataFrame(
{
"datetime": [
datetime.utcnow().replace(tzinfo=pytz.utc) for _ in range(3)
]
}
),
0,
0,
True,
[],
[],
),
],
ids=[
"Test small dataframe update with hardcoded entity",
"Test larger dataframe update with hardcoded entity",
"Test larger dataframe update with hardcoded entity and feature",
"Test larger dataframe update with two hardcoded entities and discarding of existing fields",
"Test larger dataframe update with two hardcoded entities and retention of existing fields",
"Test dataframe with no featuresdataframe",
"Test empty dataframe",
],
)
def test_add_features_from_df_success(
self,
dataframe,
feature_count,
entity_count,
discard_unused_fields,
features,
entities,
):
my_feature_set = FeatureSet(
name="my_feature_set",
features=[Feature(name="dummy_f1", dtype=ValueType.INT64)],
entities=[Entity(name="dummy_entity_1", dtype=ValueType.INT64)],
)
my_feature_set.infer_fields_from_df(
dataframe,
discard_unused_fields=discard_unused_fields,
features=features,
entities=entities,
)
assert len(my_feature_set.features) == feature_count
assert len(my_feature_set.entities) == entity_count
def test_import_tfx_schema(self):
tests_folder = pathlib.Path(__file__).parent
test_input_schema_json = open(
tests_folder / "data" / "tensorflow_metadata" / "bikeshare_schema.json"
).read()
test_input_schema = schema_pb2.Schema()
json_format.Parse(test_input_schema_json, test_input_schema)
feature_set = FeatureSet(
name="bikeshare",
entities=[Entity(name="station_id", dtype=ValueType.INT64)],
features=[
Feature(name="name", dtype=ValueType.STRING),
Feature(name="status", dtype=ValueType.STRING),
Feature(name="latitude", dtype=ValueType.FLOAT),
Feature(name="longitude", dtype=ValueType.FLOAT),
Feature(name="location", dtype=ValueType.STRING),
],
)
# Before update
for entity in feature_set.entities:
assert entity.presence is None
assert entity.shape is None
for feature in feature_set.features:
assert feature.presence is None
assert feature.shape is None
assert feature.string_domain is None
assert feature.float_domain is None
assert feature.int_domain is None
feature_set.import_tfx_schema(test_input_schema)
# After update
for feature in feature_set.features:
assert feature.presence is not None
assert feature.shape is not None
if feature.name in ["location", "name", "status"]:
assert feature.string_domain is not None
elif feature.name in ["latitude", "longitude"]:
assert feature.float_domain is not None
elif feature.name in ["station_id"]:
assert feature.int_domain is not None
def test_export_tfx_schema(self):
tests_folder = pathlib.Path(__file__).parent
test_input_feature_set = FeatureSet.from_yaml(
str(
tests_folder
/ "data"
/ "tensorflow_metadata"
/ "bikeshare_feature_set.yaml"
)
)
expected_schema_json = open(
tests_folder / "data" / "tensorflow_metadata" / "bikeshare_schema.json"
).read()
expected_schema = schema_pb2.Schema()
json_format.Parse(expected_schema_json, expected_schema)
_make_tfx_schema_domain_info_inline(expected_schema)
actual_schema = test_input_feature_set.export_tfx_schema()
assert len(actual_schema.feature) == len(expected_schema.feature)
for actual, expected in zip(actual_schema.feature, expected_schema.feature):
assert actual.SerializeToString() == expected.SerializeToString()
def make_tfx_schema_domain_info_inline(schema):
# Copy top-level domain info defined in the schema to inline definition.
# One use case is in FeatureSet which does not have access to the top-level domain
# info.
domain_ref_to_string_domain = {d.name: d for d in schema.string_domain}
domain_ref_to_float_domain = {d.name: d for d in schema.float_domain}
domain_ref_to_int_domain = {d.name: d for d in schema.int_domain}
for feature in schema.feature:
domain_info_case = feature.WhichOneof("domain_info")
if domain_info_case == "domain":
domain_ref = feature.domain
if domain_ref in domain_ref_to_string_domain:
feature.string_domain.MergeFrom(domain_ref_to_string_domain[domain_ref])
elif domain_ref in domain_ref_to_float_domain:
feature.float_domain.MergeFrom(domain_ref_to_float_domain[domain_ref])
elif domain_ref in domain_ref_to_int_domain:
feature.int_domain.MergeFrom(domain_ref_to_int_domain[domain_ref])
class TestFeatureSetRef:
def test_from_feature_set(self):
feature_set = FeatureSet("test", "test")
ref = FeatureSetRef.from_feature_set(feature_set)
assert ref.name == "test"
assert ref.project == "test"
def test_str_ref(self):
original_ref = FeatureSetRef(project="test", name="test")
ref_str = repr(original_ref)
parsed_ref = FeatureSetRef.from_str(ref_str)
assert original_ref == parsed_ref
|
the-stack_106_29377 | # Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import numpy as np
import sys,json,os
import numpy.random as rnd
try:
import itimer as it
now = it.itime
get_mops = it.itime_mops_now
except:
from timeit import default_timer
now = default_timer
get_mops = lambda t0, t1, n: (n / (t1 - t0),t1-t0)
######################################################
# GLOBAL DECLARATIONS THAT WILL BE USED IN ALL FILES #
######################################################
SEED = 7777777
# make xrange available in python 3
try:
xrange
except NameError:
xrange = range
###############################################
def get_device_selector (is_gpu = True):
if is_gpu is True:
device_selector = "gpu"
else:
device_selector = "cpu"
if os.environ.get('SYCL_DEVICE_FILTER') is None or os.environ.get('SYCL_DEVICE_FILTER') == "opencl":
return "opencl:" + device_selector
if os.environ.get('SYCL_DEVICE_FILTER') == "level_zero":
return "level_zero:" + device_selector
return os.environ.get('SYCL_DEVICE_FILTER')
def gen_data(nopt,dims):
return (
rnd.random((nopt, dims)),
rnd.random((nopt, dims)),
np.empty((nopt, nopt))
)
##############################################
def run(name, alg, sizes=5, step=2, nopt=2**10):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--steps', required=False, default=sizes, help="Number of steps")
parser.add_argument('--step', required=False, default=step, help="Factor for each step")
parser.add_argument('--size', required=False, default=nopt, help="Initial data size")
parser.add_argument('--repeat',required=False, default=1, help="Iterations inside measured region")
parser.add_argument('--text', required=False, default="", help="Print with each result")
parser.add_argument('-d', type=int, default=3, help='Dimensions')
parser.add_argument('--json', required=False, default=__file__.replace('py','json'), help="output json data filename")
args = parser.parse_args()
sizes= int(args.steps)
step = int(args.step)
nopt = int(args.size)
repeat=int(args.repeat)
dims = int(args.d)
output = {}
output['name'] = name
output['sizes'] = sizes
output['step'] = step
output['repeat'] = repeat
output['randseed'] = SEED
output['metrics'] = []
rnd.seed(SEED)
f=open("perf_output.csv",'w',1)
f2 = open("runtimes.csv",'w',1)
for i in xrange(sizes):
X,Y,D = gen_data(nopt,dims)
iterations = xrange(repeat)
alg(X,Y,D) #warmup
t0 = now()
for _ in iterations:
alg(X,Y,D)
mops,time = get_mops(t0, now(), nopt)
f.write(str(nopt) + "," + str(mops*2*repeat) + "\n")
f2.write(str(nopt) + "," + str(time) + "\n")
print("ERF: {:15s} | Size: {:10d} | MOPS: {:15.2f} | TIME: {:10.6f}".format(name, nopt, mops*repeat,time),flush=True)
output['metrics'].append((nopt,mops,time))
nopt *= step
repeat -= step
if repeat < 1:
repeat = 1
json.dump(output,open(args.json,'w'),indent=2, sort_keys=True)
f.close()
f2.close()
|
the-stack_106_29378 | #
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
from mlos.Exceptions import PointOutOfDomainException
from mlos.Spaces.Dimensions.Dimension import Dimension
from mlos.Spaces.Hypergrid import Hypergrid
from mlos.Spaces.Point import Point
class SimpleHypergrid(Hypergrid):
""" Models a space comprized of Continuous, Discrete, Ordinal and Categorical Dimensions.
Can be flat or hierarchical, depending if any join operations were performed.
"""
class JoinedSubgrid:
""" Allows a subgrid to be joined on a dimension that's not in that subgrid.
Think how in SQL you can do:
SELECT * FROM Employee JOIN Employer ON Employee.Name = 'John';
The JOIN predicate does not reference any column in Employer so we end up with a cross product of all John's with
all employers.
That's kinda the idea here. We want to join a subgrid on an arbitrary predicate expressed by that join_dimension.
"""
def __init__(self, subgrid, join_dimension):
self.subgrid = subgrid
self.join_dimension = join_dimension
def to_string(self, indent=0):
""" Returns it's own string representation.
:param indent:
:return:
"""
indent_str = ' ' * indent
return f"\n{indent_str}IF {self.join_dimension.name} IN {self.join_dimension.to_string(include_name=False)} THEN (" \
f"\n{self.subgrid.to_string(indent=indent+2)}" \
f"\n{indent_str})"
def __init__(self, name, dimensions=None, random_state=None):
Hypergrid.__init__(self, name=name, random_state=random_state)
self._dimensions = []
self.dimensions_dict = dict()
if dimensions is None:
dimensions = []
for dimension in dimensions:
self.add_dimension(dimension)
# maps a pivot dimension name to a set of guest subgrids that are joined on that external dimension
#
self.joined_subgrids_by_pivot_dimension = dict()
# maps a subgrid name to the subgrid
#
self.subgrids_by_name = dict()
def is_hierarchical(self):
return len(self.subgrids_by_name) > 0
def add_dimension(self, dimension):
assert isinstance(dimension, Dimension)
assert dimension.name not in self.dimensions_dict
dimension.random_state = self.random_state
self.dimensions_dict[dimension.name] = dimension
self._dimensions.append(dimension)
@property
def random_state(self):
return self._random_state
@random_state.setter
def random_state(self, value):
self._random_state = value
for dimension in self._dimensions:
dimension.random_state = self._random_state
for _, subgrid in self.subgrids_by_name.items():
subgrid.random_state = self.random_state
def __getitem__(self, dimension_or_subgrid_name):
subgrid_name, name_without_subgrid_name = Dimension.split_dimension_name(dimension_or_subgrid_name)
if subgrid_name is None:
if name_without_subgrid_name in self.dimensions_dict.keys():
return self.dimensions_dict[dimension_or_subgrid_name]
if name_without_subgrid_name in self.subgrids_by_name.keys():
return self.subgrids_by_name[name_without_subgrid_name]
raise KeyError(f"{dimension_or_subgrid_name} does not match any dimension names nor any subgrid names.")
subgrid = self.subgrids_by_name[subgrid_name]
return subgrid[name_without_subgrid_name]
def get(self, dimension_or_subgrid_name, default=None):
try:
return self[dimension_or_subgrid_name]
except KeyError:
return default
def __repr__(self):
return f"{self.to_string(indent=2)}"
def to_string(self, indent=0):
indent_str = ' ' * indent
dimensions_indent_str = ' ' * (indent+2)
root_grid_header = f"{indent_str}Name: {self.name}\n" \
f"{indent_str}Dimensions:\n"
root_dimension_strings = []
for dimension in self._dimensions:
root_dimension_strings.append(f"{dimensions_indent_str}{dimension}")
root_grid_string = root_grid_header + "\n".join(root_dimension_strings)
if self.is_hierarchical():
root_grid_string += "\n"
subgrid_strings = []
for _, joined_subgrids in self.joined_subgrids_by_pivot_dimension.items():
for joined_subgrid in joined_subgrids:
subgrid_strings.append(joined_subgrid.to_string(indent=indent))
subgrid_string = "\n".join(subgrid_strings)
return root_grid_string + subgrid_string
def add_subgrid_on_external_dimension(self, other_hypergrid: Hypergrid, external_dimension: Dimension):
assert external_dimension.name in self.dimensions_dict, f"{self.name} does not contain dimension {external_dimension.name}"
assert other_hypergrid.name not in self.dimensions_dict.keys(), f"{other_hypergrid.name} collides with a dimension name."
if not self[external_dimension.name].intersects(external_dimension):
# They don't intersect so nothing to do
return
guest_subgrids_joined_on_dimension = self.joined_subgrids_by_pivot_dimension.get(external_dimension.name, set())
if any(guest_subgrid.subgrid.name == other_hypergrid.name for guest_subgrid in guest_subgrids_joined_on_dimension):
raise RuntimeError(f"Subgrid {other_hypergrid.name} already joined to hypergrid {self.name} along the dimension {external_dimension.name}.")
other_hypergrid.random_state = self.random_state
guest_subgrids_joined_on_dimension.add(SimpleHypergrid.JoinedSubgrid(subgrid=other_hypergrid, join_dimension=external_dimension))
self.joined_subgrids_by_pivot_dimension[external_dimension.name] = guest_subgrids_joined_on_dimension
self.subgrids_by_name[other_hypergrid.name] = other_hypergrid
def __contains__(self, item):
if isinstance(item, Point):
return self.contains_point(point=item)
raise NotImplementedError
def join(self, subgrid: Hypergrid, on_external_dimension: Dimension):
""" Joins the subgrid on the specified dimension.
:param subgrid:
:param on_external_dimension:
:return:
"""
if subgrid is None:
return self
assert on_external_dimension is not None
if subgrid.name in self.dimensions_dict.keys():
raise ValueError(f"{subgrid.name} collides with a dimension name.")
external_dimension = on_external_dimension
join_dimension_name = external_dimension.name
subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(join_dimension_name)
if subgrid_name is None:
self.add_subgrid_on_external_dimension(other_hypergrid=subgrid, external_dimension=external_dimension)
else:
existing_subgrid = self.subgrids_by_name.get(subgrid_name, None)
assert existing_subgrid is not None
external_dimension = external_dimension.copy()
external_dimension.name = dimension_name_without_subgrid_name
self.subgrids_by_name[subgrid_name] = existing_subgrid.join(
subgrid=subgrid,
on_external_dimension=external_dimension
)
return self
def contains_point(self, point: Point):
""" Checks if point belongs to the hypergrid.
We must first see if for every dimension of the root hypergrid, the Point:
a) specifies the dimension
b) the value along that dimension is within bounds
Then for every pivotal dimension present in the point we must:
a) find the corresponding subgrid that might have been joined
b) check if the value along pivotal dimension belongs to that subgrid
c) if b) is true, then for every dimension in the subgrid, check if the points dimension
values are within bounds.
This has to be recursive, because any of the subgrids can be hierarchical already.
:param point:
:return:
"""
if not all(point.get(dimension.name) is not None and point.get(dimension.name) in dimension for dimension in self._dimensions):
return False
for external_dimension_name, guest_subgrids_joined_on_dimension in self.joined_subgrids_by_pivot_dimension.items():
for guest_subgrid in guest_subgrids_joined_on_dimension:
if point[external_dimension_name] in guest_subgrid.join_dimension:
# We need to check if the sub_point belongs to the sub_grid
#
subgrid = guest_subgrid.subgrid
if subgrid.name not in point or point[subgrid.name] not in subgrid:
return False
return True
def contains_space(self, other_space):
""" Checks if other_space is a subspace of this one.
For another space to be a subspace of this one:
1. all of the other_space.dimensions must be in self.dimensions
2. every dimension in other_space.dimensions must be contained by the corresponding dimension in this space.
However the complication arises for hierarchical hypergrids so we'll tackle this more complex problem down the road.
:param other_space:
:return:
"""
if self.is_hierarchical() or other_space.is_hierarchical():
raise NotImplementedError
for other_dimension in other_space.dimensions:
our_dimension = self.dimensions_dict.get(other_dimension.name, None)
if our_dimension is None:
return False
if other_dimension not in our_dimension:
return False
return True
def random(self, point=None):
if point is None:
point = Point()
for dimension in self._dimensions:
if dimension.name not in point:
point[dimension.name] = dimension.random()
for external_dimension_name, guest_subgrids_joined_on_dimension in self.joined_subgrids_by_pivot_dimension.items():
for joined_subgrid in guest_subgrids_joined_on_dimension:
if point[external_dimension_name] in joined_subgrid.join_dimension:
sub_point = joined_subgrid.subgrid.random()
point[joined_subgrid.subgrid.name] = sub_point
return point
@property
def dimensions(self):
dimensions = []
for dimension in self._dimensions:
dimensions.append(dimension)
for subgrid_name, subgrid in self.subgrids_by_name.items():
for dimension in subgrid.dimensions:
returned_dimension = dimension.copy()
returned_dimension.name = subgrid_name + "." + returned_dimension.name
dimensions.append(returned_dimension)
return dimensions
@property
def root_dimensions(self):
return self._dimensions
def get_dimensions_for_point(self, point, return_join_dimensions=True):
""" Returns dimensions that the given point belongs to.
For join dimensions, it can return the joined_subgrid.join_dimension if return_join_dimensions == True,
else it returns the original dimension.
In a hierarchical hypergrid, coordiantes of a point in the root hypergrid determine which of the subgrids will be 'activated' (meaningful). For example
if point.base_boosting_regression_model_name == "LassoRegression" then the subgrid describing the configuration for Lasso Regression becomes 'activated'
that is to say, specifying parameters for Lasso Regression becomes meaningful. If point.base_boosting_regression_model_name == "RidgeRegression", we can
still specify the Lasso Regression parameters, but they would never be consumed (by the smart component) so are meaningless and effectively noise.
:param point:
:return:
"""
if point not in self:
raise PointOutOfDomainException(f"Point {point} does not belong to {self}.")
dimensions_by_name = {dimension.name: dimension for dimension in self._dimensions}
ordered_dimension_names = [dimension.name for dimension in self._dimensions]
for external_dimension_name, guest_subgrids_joined_on_dimension in self.joined_subgrids_by_pivot_dimension.items():
for joined_subgrid in guest_subgrids_joined_on_dimension:
if point[external_dimension_name] in joined_subgrid.join_dimension:
# We return this narrower join dimension, since point[join_dimension_name] has
# to belong to the join_dimension for all of the subgrid dimensions to make sense.
#
if return_join_dimensions:
dimensions_by_name[external_dimension_name] = joined_subgrid.join_dimension
subgrid = joined_subgrid.subgrid
for dimension in subgrid.get_dimensions_for_point(point[subgrid.name], return_join_dimensions=return_join_dimensions):
dimension = dimension.copy()
dimension.name = f"{subgrid.name}.{dimension.name}"
dimensions_by_name[dimension.name] = dimension
ordered_dimension_names.append(dimension.name)
# Returning dimensions in order they were visited (mostly to make sure that root dimension names come first.
#
return [dimensions_by_name[name] for name in ordered_dimension_names]
|
the-stack_106_29380 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import art
# -- Project information -----------------------------------------------------
project = 'Adversarial Robustness Toolbox'
copyright = '2018, The Adversarial Robustness Toolbox (ART) Authors'
author = 'Maria-Irina Nicolae'
# The short X.Y version
version = '1.6'
# The full version, including alpha/beta/rc tags
release = '1.6.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.viewcode',
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
if os.environ.get('READTHEDOCS') != 'True':
try:
import sphinx_rtd_theme
except ImportError:
pass # assume we have sphinx >= 1.3
else:
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'adversarial-robustness-toolboxdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'adversarial-robustness-toolbox.tex', 'adversarial-robustness-toolbox Documentation',
'Maria-Irina Nicolae', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'adversarial-robustness-toolbox', 'adversarial-robustness-toolbox Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'adversarial-robustness-toolbox', 'adversarial-robustness-toolbox Documentation',
author, 'adversarial-robustness-toolbox', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
extensions = [
"sphinx.ext.autodoc",
"sphinx_autodoc_annotation",
]
|
the-stack_106_29382 | #!/usr/bin/env python
# encoding: utf-8
class Solution:
def maxArea(self, height: List[int]) -> int:
n = len(height)
ans, left, right = float('-inf'), 0, n-1
while left < right:
if height[left] < height[right]:
ans = max(ans, height[left] * (right - left))
left += 1
else:
ans = max(ans, height[right] * (right - left))
right -= 1
return ans
|
the-stack_106_29383 | import numpy as np
import cv2, PIL
from cv2 import aruco
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
#matplotlib nbagg
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
frame = cv2.imread("tmp.png")
#plt.figure()
#plt.imshow(frame)
#plt.show()
## time
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
#plt.figure()
#plt.imshow(frame_markers)
with open('readme.txt', 'w') as f:
for i in range(len(ids)):
c = corners[i][0]
for j in range(0, 4):
f.write(str(c[j,0]))
f.write(', ')
f.write(str(c[j,1]))
f.write('\n')
#plt.plot([c[:, 0].mean()], [c[:, 1].mean()], "o", label = "id={0}".format(ids[i]))
#plt.legend()
#plt.show()
|
the-stack_106_29384 | import argparse
import logging
import spotipy
from spotipy.oauth2 import SpotifyOAuth
logger = logging.getLogger('examples.add_a_saved_album')
logging.basicConfig(level='DEBUG')
scope = 'user-library-modify'
def get_args():
parser = argparse.ArgumentParser(description='Creates a playlist for user')
parser.add_argument('-a', '--aids', action='append',
required=True, help='Album ids')
return parser.parse_args()
def main():
args = get_args()
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
sp.current_user_saved_albums_add(albums=args.aids)
if __name__ == '__main__':
main()
|
the-stack_106_29385 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Dict, Mapping, Optional, Sequence, Type, Union
import pytorch_lightning as pl
import torch
from flash.text.seq2seq.core.model import Seq2SeqTask
from flash.text.seq2seq.summarization.metric import RougeMetric
class SummarizationTask(Seq2SeqTask):
"""Task for Seq2Seq Summarization.
Args:
backbone: backbone model to use for the task.
loss_fn: Loss function for training.
optimizer: Optimizer to use for training, defaults to `torch.optim.Adam`.
metrics: Metrics to compute for training and evaluation.
learning_rate: Learning rate to use for training, defaults to `3e-4`
val_target_max_length: Maximum length of targets in validation. Defaults to `128`
num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`
use_stemmer: Whether Porter stemmer should be used to strip word suffixes to improve matching.
rouge_newline_sep: Add a new line at the beginning of each sentence in Rouge Metric calculation.
"""
def __init__(
self,
backbone: str = "sshleifer/tiny-mbart",
loss_fn: Optional[Union[Callable, Mapping, Sequence]] = None,
optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,
metrics: Union[pl.metrics.Metric, Mapping, Sequence, None] = None,
learning_rate: float = 5e-5,
val_target_max_length: Optional[int] = None,
num_beams: Optional[int] = 4,
use_stemmer: bool = True,
rouge_newline_sep: bool = True
):
self.save_hyperparameters()
super().__init__(
backbone=backbone,
loss_fn=loss_fn,
optimizer=optimizer,
metrics=metrics,
learning_rate=learning_rate,
val_target_max_length=val_target_max_length,
num_beams=num_beams
)
self.rouge = RougeMetric(
rouge_newline_sep=rouge_newline_sep,
use_stemmer=use_stemmer,
)
@property
def task(self) -> str:
return "summarization"
def compute_metrics(self, generated_tokens: torch.Tensor, batch: Dict, prefix: str) -> None:
tgt_lns = self.tokenize_labels(batch["labels"])
result = self.rouge(self._postprocess.uncollate(generated_tokens), tgt_lns)
self.log_dict(result, on_step=False, on_epoch=True)
|
the-stack_106_29386 | import glob
import os
from print_utils import *
def get_mappings(root_dir, files, annot_name):
pairs = []
for f in files:
f = f.replace(root_dir, '/')
img_f = f.replace(annot_name, 'leftImg8bit')
img_f = img_f.replace('_labelTrainIds.png', '.png')
if not os.path.isfile(root_dir + img_f):
print_error_message('{} file does not exist. Please check'.format(root_dir + img_f))
exit()
line = img_f + ',' + f
pairs.append(line)
return pairs
def main(cityscapesPath, split):
searchFine = os.path.join(cityscapesPath, "gtFine", split, "*", '*_labelTrainIds.png')
filesFine = glob.glob(searchFine)
filesFine.sort()
if not filesFine:
print_warning_message("Did not find any files. Please check root directory: {}.".format(cityscapesPath))
fine_pairs = []
else:
print_info_message('{} files found for {} split'.format(len(filesFine), split))
fine_pairs = get_mappings(cityscapesPath, filesFine, 'gtFine')
if not fine_pairs:
print_error_message('No pair exist. Exiting')
exit()
else:
print_info_message('Creating train and val files.')
f_name = split + '.txt'
with open(os.path.join(cityscapesPath, f_name), 'w') as txtFile:
for pair in fine_pairs:
txtFile.write(pair + '\n')
print_info_message('{} created in {} with {} pairs'.format(f_name, cityscapesPath, len(fine_pairs)))
if split == 'train':
split_orig = split
split = split + '_extra'
searchCoarse = os.path.join(cityscapesPath, "gtCoarse", split, "*", '*_labelTrainIds.png')
filesCoarse = glob.glob(searchCoarse)
filesCoarse.sort()
if not filesCoarse:
print_warning_message("Did not find any files. Please check root directory: {}.".format(cityscapesPath))
course_pairs = []
else:
print_info_message('{} files found for {} split'.format(len(filesCoarse), split))
course_pairs = get_mappings(cityscapesPath, filesCoarse, 'gtCoarse')
if not course_pairs:
print_warning_message('No pair exist for coarse data')
return
else:
print_info_message('Creating train and val files.')
f_name = split_orig + '_coarse.txt'
with open(os.path.join(cityscapesPath, f_name), 'w') as txtFile:
for pair in course_pairs:
txtFile.write(pair + '\n')
print_info_message('{} created in {} with {} pairs'.format(f_name, cityscapesPath, len(course_pairs)))
if __name__ == '__main__':
cityscapes_path = '../../../vision_datasets/cityscapes/'
main(cityscapes_path, "train")
main(cityscapes_path, "val") |
the-stack_106_29387 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Unitary gate."""
import numpy as np
from scipy.linalg import schur
from qiskit.circuit.exceptions import CircuitError
from .instruction import Instruction
class Gate(Instruction):
"""Unitary gate."""
def __init__(self, name, num_qubits, params, label=None):
"""Create a new gate.
Args:
name (str): the Qobj name of the gate
num_qubits (int): the number of qubits the gate acts on.
params (list): a list of parameters.
label (str or None): An optional label for the gate [Default: None]
"""
self._label = label
self.definition = None
super().__init__(name, num_qubits, 0, params)
def to_matrix(self):
"""Return a Numpy.array for the gate unitary matrix.
Raises:
CircuitError: If a Gate subclass does not implement this method an
exception will be raised when this base class method is called.
"""
raise CircuitError("to_matrix not defined for this {}".format(type(self)))
def power(self, exponent):
"""Creates a unitary gate as `gate^exponent`.
Args:
exponent (float): Gate^exponent
Returns:
UnitaryGate: To which `to_matrix` is self.to_matrix^exponent.
Raises:
CircuitError: If Gate is not unitary
"""
from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import
# Should be diagonalized because it's a unitary.
decomposition, unitary = schur(self.to_matrix(), output='complex')
# Raise the diagonal entries to the specified power
decomposition_power = list()
decomposition_diagonal = decomposition.diagonal()
# assert off-diagonal are 0
if not np.allclose(np.diag(decomposition_diagonal), decomposition):
raise CircuitError('The matrix is not diagonal')
for element in decomposition_diagonal:
decomposition_power.append(pow(element, exponent))
# Then reconstruct the resulting gate.
unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T
return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent))
def _return_repeat(self, exponent):
return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits,
params=self.params)
def assemble(self):
"""Assemble a QasmQobjInstruction"""
instruction = super().assemble()
if self.label:
instruction.label = self.label
return instruction
@property
def label(self):
"""Return gate label"""
return self._label
@label.setter
def label(self, name):
"""Set gate label to name
Args:
name (str or None): label to assign unitary
Raises:
TypeError: name is not string or None.
"""
if isinstance(name, (str, type(None))):
self._label = name
else:
raise TypeError('label expects a string or None')
def q_if(self, num_ctrl_qubits=1, label=None):
"""Return controlled version of gate
Args:
num_ctrl_qubits (int): number of controls to add to gate (default=1)
label (str): optional gate label
Returns:
ControlledGate: controlled version of gate. This default algorithm
uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size
num_qubits + 2*num_ctrl_qubits - 1.
Raises:
QiskitError: unrecognized mode
"""
# pylint: disable=cyclic-import
from .add_control import add_control
return add_control(self, num_ctrl_qubits, label)
@staticmethod
def _broadcast_single_argument(qarg):
"""Expands a single argument.
For example: [q[0], q[1]] -> [q[0]], [q[1]]
"""
# [q[0], q[1]] -> [q[0]]
# -> [q[1]]
for arg0 in qarg:
yield [arg0], []
@staticmethod
def _broadcast_2_arguments(qarg0, qarg1):
if len(qarg0) == len(qarg1):
# [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]]
# -> [q[1], r[1]]
for arg0, arg1 in zip(qarg0, qarg1):
yield [arg0, arg1], []
elif len(qarg0) == 1:
# [[q[0]], [r[0], r[1]]] -> [q[0], r[0]]
# -> [q[0], r[1]]
for arg1 in qarg1:
yield [qarg0[0], arg1], []
elif len(qarg1) == 1:
# [[q[0], q[1]], [r[0]]] -> [q[0], r[0]]
# -> [q[1], r[0]]
for arg0 in qarg0:
yield [arg0, qarg1[0]], []
else:
raise CircuitError('Not sure how to combine these two qubit arguments:\n %s\n %s' %
(qarg0, qarg1))
@staticmethod
def _broadcast_3_or_more_args(qargs):
if all(len(qarg) == len(qargs[0]) for qarg in qargs):
for arg in zip(*qargs):
yield list(arg), []
else:
raise CircuitError(
'Not sure how to combine these qubit arguments:\n %s\n' % qargs)
def broadcast_arguments(self, qargs, cargs):
"""Validation and handling of the arguments and its relationship.
For example:
`cx([q[0],q[1]], q[2])` means `cx(q[0], q[2]); cx(q[1], q[2])`. This method
yields the arguments in the right grouping. In the given example::
in: [[q[0],q[1]], q[2]],[]
outs: [q[0], q[2]], []
[q[1], q[2]], []
The general broadcasting rules are:
* If len(qargs) == 1::
[q[0], q[1]] -> [q[0]],[q[1]]
* If len(qargs) == 2::
[[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]]
[[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]]
[[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]]
* If len(qargs) >= 3::
[q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...]
Args:
qargs (List): List of quantum bit arguments.
cargs (List): List of classical bit arguments.
Returns:
Tuple(List, List): A tuple with single arguments.
Raises:
CircuitError: If the input is not valid. For example, the number of
arguments does not match the gate expectation.
"""
if len(qargs) != self.num_qubits or cargs:
raise CircuitError(
'The amount of qubit/clbit arguments does not match the gate expectation.')
if any([not qarg for qarg in qargs]):
raise CircuitError('One or more of the arguments are empty')
if len(qargs) == 1:
return Gate._broadcast_single_argument(qargs[0])
elif len(qargs) == 2:
return Gate._broadcast_2_arguments(qargs[0], qargs[1])
elif len(qargs) >= 3:
return Gate._broadcast_3_or_more_args(qargs)
else:
raise CircuitError('This gate cannot handle %i arguments' % len(qargs))
|
the-stack_106_29388 | from django.db import models
from app.validators import ingredients_validator
# Create your models here.
class Recipe(models.Model):
title = models.CharField(max_length=30)
image_url = models.URLField()
description = models.TextField()
ingredients = models.CharField(
max_length=250,
validators=(
ingredients_validator,
)
)
time = models.IntegerField()
def __str__(self):
return f'{self.title}'
|
the-stack_106_29390 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""BLEURT's Tensorflow ops."""
from bleurt import checkpoint as checkpoint_lib
from bleurt.lib import optimization
import numpy as np
from scipy import stats
import tensorflow.compat.v1 as tf
from tf_slim import metrics
from bleurt.lib import modeling
flags = tf.flags
logging = tf.logging
FLAGS = flags.FLAGS
# BLEURT flags.
flags.DEFINE_string("bleurt_checkpoint_name", "bert_custom",
"Name of the BLEURT export to be created.")
flags.DEFINE_string("init_bleurt_checkpoint", None,
"Existing BLEURT export to be fine-tuned.")
# BERT flags.
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_integer(
"max_seq_length", None,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("dynamic_seq_length", False,
"Exports model with dymaic sequence length.")
# Flags to control training setup.
flags.DEFINE_enum("export_metric", "kendalltau", ["correlation", "kendalltau"],
"Metric to chose the best model in export functions.")
flags.DEFINE_integer("shuffle_buffer_size", 500,
"Size of buffer used to shuffle the examples.")
# Flags to contol optimization.
flags.DEFINE_enum("optimizer", "adam", ["adam", "sgd", "adagrad"],
"Which optimizer to use.")
flags.DEFINE_float("learning_rate", 1e-5, "The initial learning rate for Adam.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
# BLEURT model flags.
flags.DEFINE_integer("n_hidden_layers", 0,
"Number of fully connected/RNN layers before prediction.")
flags.DEFINE_integer("hidden_layers_width", 128, "Width of hidden layers.")
flags.DEFINE_float("dropout_rate", 0,
"Probability of dropout over BERT embedding.")
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, use_one_hot_embeddings, n_hidden_layers,
hidden_layers_width, dropout_rate):
"""Creates a regression model, loosely adapted from language/bert.
Args:
bert_config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: int32 Tensor of shape [batch_size, seq_length].
segment_ids: int32 Tensor of shape [batch_size, seq_length].
labels: float32 Tensor of shape [batch_size].
use_one_hot_embeddings: Whether to use one-hot word embeddings or
tf.embedding_lookup() for the word embeddings.
n_hidden_layers: number of FC layers before prediction.
hidden_layers_width: width of FC layers.
dropout_rate: probability of dropout over BERT embedding.
Returns:
loss: <float32>[]
per_example_loss: <float32>[batch_size]
pred: <float32>[batch_size]
"""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# <float>[batch_size, hidden_size]
output_layer = model.get_pooled_output()
bert_embed_size = output_layer.shape[-1]
logging.info("BERT embedding width: {}".format(str(bert_embed_size)))
if is_training and dropout_rate > 0:
# Implements dropout on top of BERT's pooled output.
# <float32>[batch_size, hidden_size]
output_layer = tf.nn.dropout(output_layer, rate=dropout_rate)
# Hidden layers
for i in range(n_hidden_layers):
# <float32>[batch_size, hidden_layers_width]
logging.info("Adding hidden layer {}".format(i + 1))
output_layer = tf.layers.dense(
output_layer, hidden_layers_width, activation=tf.nn.relu)
logging.info("Building linear output...")
# <float32>[batch_size,1]
predictions = tf.layers.dense(
output_layer, 1, bias_initializer=tf.constant_initializer(0.15))
# <float32>[batch_size]
predictions = tf.squeeze(predictions, 1)
# <float32>[batch_size]
per_example_loss = tf.pow(predictions - labels, 2)
# <float32> []
loss = tf.reduce_mean(per_example_loss, axis=-1)
return (loss, per_example_loss, predictions)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, n_hidden_layers,
hidden_layers_width, dropout_rate):
"""Returns `model_fn` closure."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator/TPUEstimator."""
logging.info("*** Building Regression BERT Model ***")
tf.set_random_seed(55555)
logging.info("*** Features ***")
for name in sorted(features.keys()):
logging.info(" name = %s, shape = %s", name, features[name].shape)
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
if mode != tf.estimator.ModeKeys.PREDICT:
scores = features["score"]
else:
scores = tf.zeros(tf.shape(input_ids)[0])
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
total_loss, per_example_loss, pred = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, scores,
use_one_hot_embeddings, n_hidden_layers, hidden_layers_width,
dropout_rate)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
# Loads pretrained model
logging.info("**** Initializing from {} ****".format(init_checkpoint))
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
train_op = optimization.create_optimizer(total_loss, learning_rate,
num_train_steps,
num_warmup_steps, use_tpu)
if use_tpu:
output_spec = tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
if use_tpu:
eval_metrics = (metric_fn, [per_example_loss, pred, scores])
output_spec = tf.estimator.TPUEstimatorSpec(
mode=mode, loss=total_loss, eval_metric=eval_metrics)
else:
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=metric_fn(per_example_loss, pred, scores))
elif mode == tf.estimator.ModeKeys.PREDICT:
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions={"predictions": pred})
return output_spec
return model_fn
# TF ops to compute the metrics.
def concat_tensors(predictions, ratings, sources=None):
"""Concatenates batches of ratings and predictions."""
concat_predictions_value, concat_predictions_update = \
metrics.streaming_concat(predictions)
concat_labels_value, concat_labels_update = \
metrics.streaming_concat(ratings)
if sources is None:
return concat_predictions_value, concat_labels_value, \
tf.group(concat_predictions_update, concat_labels_update)
concat_sources_value, concat_sources_update = \
metrics.streaming_concat(sources)
return concat_predictions_value, concat_labels_value, concat_sources_value, \
tf.group(concat_predictions_update, concat_labels_update,
concat_sources_update)
def kendall_tau_metric(predictions, ratings, weights=None):
"""Builds the computation graph for Kendall Tau metric."""
def _kendall_tau(x, y):
tau = stats.kendalltau(x, y)[0]
return np.array(tau).astype(np.float32)
if weights is not None:
predictions = tf.boolean_mask(predictions, weights)
ratings = tf.boolean_mask(ratings, weights)
with tf.variable_scope("kendall_tau"):
concat_predictions_value, concat_labels_value, update_op = (
concat_tensors(predictions, ratings))
metric_value = tf.reshape(
tf.numpy_function(_kendall_tau,
[concat_predictions_value, concat_labels_value],
tf.float32),
shape=[])
return metric_value, update_op
def metric_fn(per_example_loss, pred, ratings):
"""Metrics for BLEURT experiments."""
# Mean of predictions
mean_pred = tf.metrics.mean(values=pred)
# Standard deviation of predictions
mean = tf.reduce_mean(pred)
diffs = tf.sqrt(tf.pow(pred - mean, 2))
pred_sd = tf.metrics.mean(values=diffs)
# Average squared error
mean_loss = tf.metrics.mean(values=per_example_loss)
# Average absolute error
squared_diff = tf.pow(pred - ratings, 2)
per_example_err = tf.sqrt(squared_diff)
mean_err = tf.metrics.mean(per_example_err)
# Pearson correlation
correlation = metrics.streaming_pearson_correlation(pred, ratings)
# Kendall Tau
kendalltau = kendall_tau_metric(pred, ratings)
output = {
"eval_loss": mean_loss,
"eval_mean_err": mean_err,
"eval_mean_pred": mean_pred,
"eval_pred_sd": pred_sd,
"correlation": correlation,
"kendalltau": kendalltau,
}
return output
def input_fn_builder(tfrecord_file,
seq_length,
is_training,
batch_size,
drop_remainder=True):
"""Creates an `input_fn` closure to be passed to Estimator."""
logging.info(
"Creating input fun with batch_size: {} and drop remainder: {}".format(
str(batch_size), str(drop_remainder)))
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"score": tf.FixedLenFeature([], tf.float32)
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params): # pylint: disable=unused-argument
"""Acutal data generator."""
tfrecord_file_expanded = tf.io.gfile.glob(tfrecord_file)
n_files = len(tfrecord_file_expanded)
if n_files > 1:
logging.info("Found {} files matching {}".format(
str(n_files), tfrecord_file))
d = tf.data.TFRecordDataset(tfrecord_file_expanded)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=FLAGS.shuffle_buffer_size)
d = d.map(lambda record: _decode_record(record, name_to_features))
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def _model_comparator(best_eval_result, current_eval_result):
metric = FLAGS.export_metric
return best_eval_result[metric] <= current_eval_result[metric]
def _serving_input_fn_builder(seq_length):
"""Input function for exported models."""
# We had to use `tf.zeros` instead of the usual
# `tf.placeholder(tf.int64, shape=[None, seq_length])` to be compatible with
# TF2's eager mode, which deprecates all calls to `tf.placeholder`.
if tf.executing_eagerly():
name_to_features = {
"input_ids": tf.zeros(dtype=tf.int64, shape=[0, seq_length]),
"input_mask": tf.zeros(dtype=tf.int64, shape=[0, seq_length]),
"segment_ids": tf.zeros(dtype=tf.int64, shape=[0, seq_length])
}
elif not tf.executing_eagerly() and not FLAGS.dynamic_seq_length:
name_to_features = {
"input_ids": tf.placeholder(tf.int64, shape=[None, seq_length]),
"input_mask": tf.placeholder(tf.int64, shape=[None, seq_length]),
"segment_ids": tf.placeholder(tf.int64, shape=[None, seq_length])
}
elif FLAGS.dynamic_seq_length:
assert not tf.executing_eagerly(), \
"Training with `dynamic_seq_length` is not supported in Eager mode."
logging.info("Exporting a model with dynamic sequence length.")
name_to_features = {
"input_ids": tf.placeholder(tf.int64, shape=[None, None]),
"input_mask": tf.placeholder(tf.int64, shape=[None, None]),
"segment_ids": tf.placeholder(tf.int64, shape=[None, None])
}
return tf.estimator.export.build_raw_serving_input_receiver_fn(
name_to_features)
def run_finetuning(train_tfrecord,
dev_tfrecord,
train_eval_fun=None,
use_tpu=False,
additional_train_params=None):
"""Main function to train and eval BLEURT."""
logging.info("Initializing BLEURT training pipeline.")
bleurt_params = checkpoint_lib.get_bleurt_params_from_flags_or_ckpt()
max_seq_length = bleurt_params["max_seq_length"]
bert_config_file = bleurt_params["bert_config_file"]
init_checkpoint = bleurt_params["init_checkpoint"]
logging.info("Creating input data pipeline.")
logging.info("Train/Eval batch size: {}".format(str(FLAGS.batch_size)))
train_input_fn = input_fn_builder(
train_tfrecord,
seq_length=max_seq_length,
is_training=True,
batch_size=FLAGS.batch_size,
drop_remainder=use_tpu)
dev_input_fn = input_fn_builder(
dev_tfrecord,
seq_length=max_seq_length,
is_training=False,
batch_size=FLAGS.batch_size,
drop_remainder=use_tpu)
logging.info("Creating model.")
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
num_train_steps = FLAGS.num_train_steps
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=use_tpu,
use_one_hot_embeddings=use_tpu,
n_hidden_layers=FLAGS.n_hidden_layers,
hidden_layers_width=FLAGS.hidden_layers_width,
dropout_rate=FLAGS.dropout_rate)
logging.info("Creating TF Estimator.")
exporters = [
tf.estimator.BestExporter(
"bleurt_best",
serving_input_receiver_fn=_serving_input_fn_builder(max_seq_length),
event_file_pattern="eval_default/*.tfevents.*",
compare_fn=_model_comparator,
exports_to_keep=1)
]
tf.enable_resource_variables()
logging.info("*** Entering the Training / Eval phase ***")
if not additional_train_params:
additional_train_params = {}
train_eval_fun(
model_fn=model_fn,
train_input_fn=train_input_fn,
eval_input_fn=dev_input_fn,
exporters=exporters,
**additional_train_params)
|
the-stack_106_29391 | """
Once a model is learned, use this to play it.
"""
import carmunk
import numpy as np
from nn import neural_net
NUM_SENSORS = 3
def play(model):
car_distance = 0
game_state = carmunk.GameState()
# Do nothing to get initial.
_, state = game_state.frame_step((2))
# Move.
while True:
car_distance += 1
# Choose action.
action = (np.argmax(model.predict(state, batch_size=1)))
# Take action.
_, state = game_state.frame_step(action)
# Tell us something.
if car_distance % 1000 == 0:
print("Current distance: %d frames." % car_distance)
if __name__ == "__main__":
saved_model = 'saved-models/128-128-64-50000-50000.h5'
model = neural_net(NUM_SENSORS, [128, 128], saved_model)
play(model)
|
the-stack_106_29393 | TEST_OCF_ACCOUNTS = (
'sanjay', # an old, sorried account with kerberos princ
'alec', # an old, sorried account with no kerberos princ
'guser', # an account specifically made for testing
'nonexist', # this account does not exist
)
TESTER_CALNET_UIDS = (
872544, # daradib
1034192, # ckuehl
869331, # tzhu
1031366, # mattmcal
1101587, # jvperrin
)
# comma separated tuples of CalLink OIDs and student group names
TEST_GROUP_ACCOUNTS = (
(91740, 'The Testing Group'), # needs to have a real OID, so boo
(46187, 'Open Computing Facility'), # good old ocf
(46692, 'Awesome Group of Awesome'), # boo another real OID
)
|
the-stack_106_29394 | """Sonarr Models."""
# pylint: disable=invalid-name, too-many-instance-attributes
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from ..const import DATE, EPISODE_ID, PATH, SERIES_ID
from .base import BaseModel
from .request_common import (
_Common2,
_Common3,
_Common4,
_Common7,
_Common8,
_Fields,
_ImportListCommon,
_ManualImport,
_Monitor,
_MonitorOption,
_Notification,
_Quality,
_QualityCommon,
_RecordCommon,
_ReleaseCommon,
_Rename,
_StatusMessage,
_TagDetails,
)
from .sonarr_common import (
_SonarrAddOptions,
_SonarrCommon,
_SonarrCommon2,
_SonarrEpisodeFile,
_SonarrEpisodeHistoryData,
_SonarrEpisodeMonitor,
_SonarrLanguageItem,
_SonarrParseEpisodeInfo,
_SonarrSeries2,
_SonarrSeriesAlternateTitle,
_SonarrSeriesCommon,
_SonarrWantedMissingRecord,
)
class SonarrCommands(str, Enum):
"""Sonarr commands."""
DOWNLOADED_EPISODES_SCAN = "DownloadedEpisodesScan"
EPISODE_SEARCH = "EpisodeSearch"
REFRESH_SERIES = "RefreshSeries"
RENAME_SERIES = "RenameSeries"
RESCAN_SERIES = "RescanSeries"
SEASON_SEARCH = "SeasonSearch"
SERIES_SEARCH = "SeriesSearch"
class SonarrEventType(Enum):
"""Sonarr event types."""
DELETED = 5
FAILED = 4
GRABBED = 1
IGNORED = 7
IMPORTED = 3
RENAMED = 6
class SonarrSortKeys(str, Enum):
"""Sonarr sort keys."""
AIR_DATE_UTC = "episode.airDateUtc"
DATE = DATE
DOWNLOAD_CLIENT = "downloadClient"
EPISODE = "episode"
EPISODE_ID = EPISODE_ID
EPISODE_TITLE = "episode.title"
ID = "id"
INDEXER = "indexer"
LANGUAGE = "language"
MESSAGE = "message"
PATH = PATH
PROGRESS = "progress"
PROTOCOL = "protocol"
QUALITY = "quality"
RATINGS = "ratings"
SERIES_ID = SERIES_ID
SERIES_TITLE = "series.sortTitle"
SIZE = "size"
SOURCE_TITLE = "sourcetitle"
STATUS = "status"
TIMELEFT = "timeleft"
@dataclass(init=False)
class SonarrCalendar(_SonarrWantedMissingRecord, _SonarrCommon2):
"""Sonarr calendar attributes."""
series: _SonarrSeries2 | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.series = _SonarrSeries2(self.series) or {}
@dataclass(init=False)
class SonarrEpisode(_SonarrCommon):
"""Sonarr episode attributes."""
series: _SonarrSeriesCommon | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.series = _SonarrSeriesCommon(self.series) or {}
@dataclass(init=False)
class SonarrEpisodeFile(_SonarrEpisodeFile):
"""Sonarr episode file attributes."""
@dataclass(init=False)
class SonarrEpisodeHistory(_Common2, _QualityCommon):
"""Sonarr history record attributes."""
data: _SonarrEpisodeHistoryData | None = None
date: datetime | None = None
episodeId: int | None = None
id: int | None = None
language: _Common3 | None = None
languageCutoffNotMet: bool | None = None
seriesId: int | None = None
sourceTitle: str | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.data = _SonarrEpisodeHistoryData(self.data) or {}
self.language = _Common3(self.language) or {}
@dataclass(init=False)
class SonarrHistory(_RecordCommon):
"""Sonarr history attributes."""
records: list[SonarrEpisodeHistory] | None = None
def __post_init__(self):
"""Post init."""
self.records = [SonarrEpisodeHistory(record) for record in self.records or []]
@dataclass(init=False)
class SonarrWantedMissing(_RecordCommon):
"""Sonarr wanted missing attributes."""
records: list[_SonarrWantedMissingRecord] | None = None
def __post_init__(self):
"""Post init."""
self.records = [
_SonarrWantedMissingRecord(record) for record in self.records or []
]
@dataclass(init=False)
class SonarrQueue(_RecordCommon):
"""Sonarr queue attributes."""
records: list[SonarrQueueDetail] | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.records = [SonarrQueueDetail(record) for record in self.records or []]
@dataclass(init=False)
class SonarrParse(BaseModel):
"""Sonarr parse attributes."""
episodes: list[SonarrEpisode] | None = None
parsedEpisodeInfo: _SonarrParseEpisodeInfo | None = None
series: _SonarrSeries2 | None = None
title: str | None = None
def __post_init__(self):
"""Post init."""
self.episodes = [SonarrEpisode(episode) for episode in self.episodes or []]
self.parsedEpisodeInfo = _SonarrParseEpisodeInfo(self.parsedEpisodeInfo) or {}
self.series = _SonarrSeries2(self.series) or {}
@dataclass(init=False)
class _SonarrSceneMapping(BaseModel):
"""Sonarr scene mapping attributes."""
title: str | None = None
seasonNumber: int | None = None
@dataclass(init=False)
class SonarrRelease(_ReleaseCommon):
"""Sonarr release attributes."""
absoluteEpisodeNumbers: list[int] | None = None
episodeNumbers: list[int] | None = None
episodeRequested: bool | None = None
fullSeason: bool | None = None
isAbsoluteNumbering: bool | None = None
isDaily: bool | None = None
isPossibleSpecialEpisode: bool | None = None
language: _Common3 | None = None
languageWeight: int | None = None
mappedAbsoluteEpisodeNumbers: list[int] | None = None
mappedEpisodeNumbers: list[int] | None = None
mappedSeasonNumber: int | None = None
preferredWordScore: int | None = None
quality: _Quality | None = None
rejected: bool | None = None
releaseGroup: str | None = None
releaseHash: str | None = None
releaseWeight: int | None = None
sceneMapping: _SonarrSceneMapping | None = None
seasonNumber: int | None = None
seriesTitle: str | None = None
special: bool | None = None
tvdbId: int | None = None
tvRageId: int | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.language = _Common3(self.language) or {}
self.quality = _Quality(self.quality) or {}
self.sceneMapping = _SonarrSceneMapping(self.sceneMapping) or {}
@dataclass(init=False)
class SonarrSeries(_SonarrSeriesCommon):
"""Sonarr series attributes."""
alternateTitles: list[_SonarrSeriesAlternateTitle] | None = None
rootFolderPath: str | None = None
previousAiring: str | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.alternateTitles = [
_SonarrSeriesAlternateTitle(altTit) for altTit in self.alternateTitles or []
]
@dataclass(init=False)
class SonarrSeriesAdd(SonarrSeries):
"""Sonarr series add attributes."""
addOptions: _SonarrAddOptions | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.addOptions = _SonarrAddOptions(self.addOptions) or {}
@dataclass(init=False)
class SonarrSeriesLookup(_SonarrSeriesCommon):
"""Sonarr series lookup attributes."""
@dataclass(init=False)
class SonarrBlocklistSeries(_Common7):
"""Sonarr blocklist series attributes."""
date: datetime | None = None
episodeIds: list[int] | None = None
language: _Common3 | None = None
message: str | None = None
quality: _Quality | None = None
seriesId: int | None = None
sourceTitle: str | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.language = _Common3(self.language) or {}
self.quality = _Quality(self.quality) or {}
@dataclass(init=False)
class SonarrBlocklist(_RecordCommon):
"""Sonarr blocklist attributes."""
records: list[SonarrBlocklistSeries] | None = None
def __post_init__(self):
"""Post init."""
self.records = [SonarrBlocklistSeries(record) for record in self.records or []]
@dataclass(init=False)
class SonarrNamingConfig(BaseModel):
"""Sonarr naming config attributes."""
animeEpisodeFormat: str | None = None
dailyEpisodeFormat: str | None = None
id: int | None = None
includeEpisodeTitle: bool | None = None
includeQuality: bool | None = None
includeSeriesTitle: bool | None = None
multiEpisodeStyle: int | None = None
numberStyle: str | None = None
renameEpisodes: bool | None = None
replaceIllegalCharacters: bool | None = None
replaceSpaces: bool | None = None
seasonFolderFormat: str | None = None
separator: str | None = None
seriesFolderFormat: str | None = None
specialsFolderFormat: str | None = None
standardEpisodeFormat: str | None = None
@dataclass(init=False)
class SonarrNotification(_Common3, _Notification):
"""Sonarr notification attributes."""
fields: list[_Fields] | None = None
onEpisodeFileDelete: bool | None = None
onEpisodeFileDeleteForUpgrade: bool | None = None
onSeriesDelete: bool | None = None
supportsOnEpisodeFileDelete: bool | None = None
supportsOnEpisodeFileDeleteForUpgrade: bool | None = None
supportsOnSeriesDelete: bool | None = None
def __post_init__(self):
"""Post init."""
self.fields = [_Fields(field) for field in self.fields or []]
@dataclass(init=False)
class SonarrQueueDetail(_Common4, _Common8):
"""Sonarr queue detail attributes."""
episode: _SonarrCommon | None = None
episodeId: int | None = None
language: _Common3 | None = None
series: _SonarrSeries2 | None = None
seriesId: int | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.quality = _Quality(self.quality) or {}
self.episode = _SonarrCommon(self.episode) or {}
self.language = _Common3(self.language) or {}
self.series = _SonarrSeries2(self.series) or {}
self.statusMessages = [_StatusMessage(x) for x in self.statusMessages or []]
@dataclass(init=False)
class SonarrRename(_Rename):
"""Sonarr rename attributes."""
episodeFileId: int | None = None
episodeNumbers: list[int] | None = None
seasonNumber: int | None = None
seriesId: int | None = None
@dataclass(init=False)
class SonarrTagDetails(_TagDetails):
"""Sonarr tag details attributes."""
indexerIds: list[int] | None = None
seriesIds: list[int] | None = None
@dataclass(init=False)
class SonarrImportList(_ImportListCommon, _Common3):
"""Sonarr importlist attributes."""
enableAutomaticAdd: bool | None = None
fields: list[_Fields] | None = None
implementation: str | None = None
implementationName: str | None = None
infoLink: str | None = None
languageProfileId: int | None = None
listType: str | None = None
qualityProfileId: int | None = None
seasonFolder: bool | None = None
seriesType: str | None = None
shouldMonitor: str | None = None
tags: list[int | None] | None = None
def __post_init__(self):
"""Post init."""
self.fields = [_Fields(field) for field in self.fields or []]
@dataclass(init=False)
class SonarrManualImport(_ManualImport):
"""Sonarr manual import attributes."""
episodes: list[SonarrEpisodeMonitor] | None = None
folderName: str | None = None
language: _Common3 | None = None
relativePath: str | None = None
seasonNumber: int | None = None
series: _SonarrSeries2 | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.episodes = [
SonarrEpisodeMonitor(episode) for episode in self.episodes or []
]
self.language = _Common3(self.language) or {}
self.series = _SonarrSeries2(self.series) or {}
@dataclass(init=False)
class SonarrSeasonPass(BaseModel):
"""Sonarr season pass attributes."""
monitoringOptions: _MonitorOption | None = None
series: list[_Monitor] | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.monitoringOptions = _MonitorOption(self.monitoringOptions) or {}
self.series = [_Monitor(x) for x in self.series or []]
@dataclass(init=False)
class SonarrLanguage(BaseModel):
"""Sonarr launguage attributes."""
cutoff: _Common3 | None = None
id: int | None = None
languages: list[_SonarrLanguageItem] | None = None
name: str | None = None
upgradeAllowed: bool | None = None
def __post_init__(self):
"""Post init."""
super().__post_init__()
self.cutoff = _Common3(self.cutoff) or {}
self.languages = [_SonarrLanguageItem(x) for x in self.languages or []]
@dataclass(init=False)
class SonarrEpisodeMonitor(_SonarrEpisodeMonitor):
"""Sonarr episode monitor attributes."""
|
the-stack_106_29396 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 2020
Evgeniya Zhukova
ID: 101239316
"""
#classes
class Toy:
#constructor
def __init__(self, id=0, name=None, age=0,
price=0, category=None, discount_price=0):
self.id = id
self.name = name
self.age = age
self.price = price
self.category = category
self.discount_price = price
class Category:
#constructor
def __init__(self, id=0, name=None):
self.id = id
self.name = name
class Cart:
#constructor
def __init__(self, cart_list = [], sum=0, sum_discount=0, sum_free=0, free_toys=0):
self.cart_list = cart_list
self.sum = sum
self.sum_discount = sum_discount
self.sum_free = sum_free
self.free_toys = free_toys
#all cart sums assign zero
def zero_sum(self):
self.sum = 0
self.sum_discount = 0
self.sum_free = 0
self.free_toys = 0 |
the-stack_106_29397 | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright (C) 2020 Inspur Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: edit_priv_user
version_added: "0.1.0"
author:
- WangBaoshan (@ISIB-group)
short_description: Change user privilege.
description:
- Change user privilege on Inspur server.
options:
uname:
description:
- User name.
type: str
required: true
role_id:
description:
- user group, default user group,'Administrator', 'Operator', 'Commonuser','OEM','NoAccess',
- use command C(user_group_info) can get all group information.
type: str
required: true
priv:
description:
- User access, select one or more from None/KVM/VMM/SOL.
choices: ['kvm', 'vmm', 'sol', 'none']
type: list
elements: str
required: true
extends_documentation_fragment:
- inspur.sm.ism
'''
EXAMPLES = '''
- name: Edit user privilege test
hosts: ism
connection: local
gather_facts: no
vars:
ism:
host: "{{ ansible_ssh_host }}"
username: "{{ username }}"
password: "{{ password }}"
tasks:
- name: "Change user privilege"
inspur.sm.edit_priv_user:
uname: "wbs"
role_id: "Administrator"
priv: "kvm,sol"
provider: "{{ ism }}"
'''
RETURN = '''
message:
description: Messages returned after module execution.
returned: always
type: str
state:
description: Status after module execution.
returned: always
type: str
changed:
description: Check to see if a change was made on the device.
returned: always
type: bool
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.inspur.sm.plugins.module_utils.ism import (ism_argument_spec, get_connection)
class User(object):
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
self.results = dict()
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=False)
def run_command(self):
self.module.params['subcommand'] = 'setpriv'
self.results = get_connection(self.module)
def show_result(self):
"""Show result"""
self.module.exit_json(**self.results)
def work(self):
"""Worker"""
self.run_command()
self.show_result()
def main():
argument_spec = dict(
uname=dict(type='str', required=True),
role_id=dict(type='str', required=True),
priv=dict(type='list', elements='str', required=True, choices=['kvm', 'vmm', 'sol', 'none']),
)
argument_spec.update(ism_argument_spec)
user_obj = User(argument_spec)
user_obj.work()
if __name__ == '__main__':
main()
|
the-stack_106_29399 |
from matplotlib import lines, pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
data = np.load('../../data/Orbits.npy')
Positions = data[:,:,(0,1)]
figure, ax = plt.subplots()
# Setting limits for x and y axis
r = 2
ax.set_xlim(-r, r)
ax.set_ylim(-r, r)
ax2 = ax.twinx()
ax2.set_xlim(-r, r)
ax2.set_ylim(-r, r)
ax3 = ax.twinx()
ax3.set_xlim(-r, r)
ax3.set_ylim(-r, r)
ax4 = ax.twinx()
ax4.set_xlim(-r, r)
ax4.set_ylim(-r, r)
ax5 = ax.twinx()
ax5.set_xlim(-r, r)
ax5.set_ylim(-r, r)
# Plotting graph for bodies
S, = ax.plot(0, 0, 'y.-', label='Sun')
M, = ax2.plot(0, 0, 'm.-', label='Mercury')
V, = ax3.plot(0, 0, 'c.-', label='Venus')
E, = ax4.plot(0, 0, 'b.-', label='Earth')
A, = ax5.plot(0, 0, 'r.-', label='Mars')
ax.legend([S, M, V, E, A], [S.get_label(), M.get_label(), V.get_label(), E.get_label(), A.get_label()], loc=0)
def animation_function(i):
S.set_data(Positions[:,0,0][i], Positions[:,0,1][i])
M.set_data(Positions[:,1,0][i], Positions[:,1,1][i])
V.set_data(Positions[:,2,0][i], Positions[:,2,1][i])
E.set_data(Positions[:,3,0][i], Positions[:,3,1][i])
A.set_data(Positions[:,4,0][i], Positions[:,4,1][i])
return
animation = FuncAnimation(figure,func = animation_function,interval = 10)
plt.show()
plt.close() |
the-stack_106_29403 | from .normalize import _normalize
class _Edition:
@staticmethod
def _edition(body):
return body.find("div", type="edition") # Note: limit to xml:space="preserve"?
@staticmethod
def language(body):
edition = _Edition._edition(body)
if edition:
return _normalize(edition.attrs.get("xml:lang"))
@staticmethod
def foreign_languages(body):
edition = _Edition._edition(body)
if not edition:
return {}
result = {}
for elem in edition.find_all("foreign"):
lang = _normalize(elem.attrs.get("xml:lang"))
if not lang:
continue
result[lang] = result.get(lang, 0) + 1
return result
|
the-stack_106_29404 | """
A collection of classes used to manipulate Ordnance Survey GB GML data,
used with prepgml4ogr.py.
"""
import os
import re
import json
import lxml
from lxml import etree
from lxml import objectify
class prep_osgml():
"""
Base class that provides the main interface methods `prepare_feature` and
`get_feat_types` and performs basic manipulation such as exposing the fid,
adding and element containing the filename of the source and adding an
element with the orientation in degrees.
"""
def __init__(self, inputfile):
self.inputfile = inputfile
self.feat_types = []
def get_feat_types(self):
return self.feat_types
def prepare_feature(self, feat_str):
# Parse the xml string into something useful
feat_elm = etree.fromstring(feat_str)
feat_elm = self._prepare_feat_elm(feat_elm)
return etree.tostring(feat_elm,
encoding='UTF-8',
pretty_print=True).decode('utf_8')
def _prepare_feat_elm(self, feat_elm):
feat_elm = self._set_srs(feat_elm)
feat_elm = self._add_fid_elm(feat_elm)
feat_elm = self._add_filename_elm(feat_elm)
feat_elm = self._add_orientation_degree_elms(feat_elm)
return feat_elm
def _set_srs(self, feat_elm):
srs_elms = feat_elm.xpath('//*[@srsName]')
for elm in srs_elms:
elm.attrib['srsName'] = 'EPSG:27700'
return feat_elm
def _add_fid_elm(self, feat_elm):
# Create an element with the fid
elm = etree.SubElement(feat_elm, "fid")
elm.text = feat_elm.get('fid')
return feat_elm
def _add_filename_elm(self, feat_elm):
# Create an element with the filename
elm = etree.SubElement(feat_elm, "filename")
elm.text = os.path.basename(self.inputfile)
return feat_elm
def _add_orientation_degree_elms(self, feat_elm):
# Correct any orientation values to be a
# tenth of their original value
orientation_elms = feat_elm.xpath('//orientation')
for elm in orientation_elms:
# Add a new orientDeg element as a child to the
# the orientation elm to be orientation/10
# (this applies integer division which is fine in
# this instance as we are not concerned with the decimals)
degree_elm = etree.SubElement(elm.getparent(), "orientDeg")
degree_elm.text = str(int(elm.text) / 10)
return feat_elm
class prep_vml(prep_osgml):
"""
Preperation class for OS VectorMap Local features.
"""
def __init__(self, inputfile):
prep_osgml.__init__(self, inputfile)
self.feat_types = [
'Text',
'VectorMapPoint',
'Line',
'RoadCLine',
'Area',
'creationDate'
]
def _prepare_feat_elm(self, feat_elm):
# We need to record the creation date so that we can include it as an
# attribute on all features, when we are passed the creationDate
# element simply record it's text value and return it as is. This is
# potentially brittle as it assumes that the creationDate element
# appears before the features in the source GML.
if feat_elm.tag == 'creationDate':
self.creation_date = feat_elm.text
return feat_elm
else:
feat_elm = prep_osgml._prepare_feat_elm(self, feat_elm)
feat_elm = self._add_tile_elm(feat_elm)
feat_elm = self._add_creation_date_elm(feat_elm)
return feat_elm
def _add_tile_elm(self, feat_elm):
elm = etree.SubElement(feat_elm, "tile")
elm.text = os.path.splitext(os.path.basename(self.inputfile))[0]
return feat_elm
def _add_creation_date_elm(self, feat_elm):
elm = etree.SubElement(feat_elm, "creationDate")
elm.text = self.creation_date
return feat_elm
class prep_vmd(prep_osgml):
def __init__(self, inputfile):
prep_osgml.__init__(self, inputfile)
self.feat_types = [
'AdministrativeBoundary',
'Airport',
'Building',
'ElectricityTransmissionLine',
'Foreshore',
'FunctionalSite',
'Glasshouse',
'HeritageSite',
'Land',
'NamedPlace',
'Woodland',
'Ornament',
'PublicAmenity',
'RailwayStation',
'RailwayTrack',
'RailwayTunnel',
'Road',
'MotorwayJunction',
'RoadTunnel',
'Roundabout',
'SpotHeight',
'SurfaceWater_Area',
'SurfaceWater_Line',
'TidalBoundary',
'TidalWater'
]
def _add_fid_elm(self, feat_elm):
# Create an element with the fid
elm = etree.SubElement(feat_elm, "fid")
elm.text = feat_elm.get('id')
return feat_elm
class prep_osmm_topo(prep_osgml):
"""
Preperation class for OS MasterMap features which in addition to the work
performed by `prep_osgml` adds `themes`, `descriptiveGroups` and
`descriptiveTerms` elements containing a delimited string of the attributes
that can appear multiple times.
"""
def __init__(self, inputfile):
prep_osgml.__init__(self, inputfile)
self.feat_types = [
'BoundaryLine',
'CartographicSymbol',
'CartographicText',
'TopographicArea',
'TopographicLine',
'TopographicPoint'
]
self.list_seperator = ', '
def _prepare_feat_elm(self, feat_elm):
feat_elm = prep_osgml._prepare_feat_elm(self, feat_elm)
feat_elm = self._add_lists_elms(feat_elm)
return feat_elm
def _add_lists_elms(self, feat_elm):
feat_elm = self._create_list_of_terms(feat_elm, 'theme')
feat_elm = self._create_list_of_terms(feat_elm, 'descriptiveGroup')
feat_elm = self._create_list_of_terms(feat_elm, 'descriptiveTerm')
return feat_elm
def _create_list_of_terms(self, feat_elm, name):
text_list = feat_elm.xpath('//%s/text()' % name)
if len(text_list):
elm = etree.SubElement(feat_elm, "%ss" % name)
elm.text = self.list_seperator.join(text_list)
return feat_elm
class prep_osmm_topo_qgis(prep_osmm_topo):
"""
Preperation class for OS MasterMap features which in addition to the work performed by
`prep_osmm_topo` adds QGIS specific label attributes such as `qFont` and `aAnchorPos`.
"""
def __init__(self, filename):
prep_osmm_topo.__init__(self, filename)
# AC - define the font
if os.name is 'posix':
# Will probably need different font names
self.fonts = ('Garamond', 'Arial', 'Roman', 'ScriptC')
elif os.name is 'nt':
# Ordnance Survey use
# 'Lutheran', 'Normal', 'Light Roman', 'Suppressed text'
self.fonts = ('GothicE', 'Monospac821 BT', 'Consolas', 'ScriptC', 'Arial Narrow')
elif os.name is 'mac':
# Will probably need different font name
self.fonts = ('Garamond', 'Arial', 'Roman', 'ScriptC')
# AC - the possible text placement positions used by QGIS
self.anchorPosition = ('Bottom Left', 'Left', 'Top Left', 'Bottom',
'Over', 'Top', 'Bottom Right', 'Right', 'Top Right')
def _prepare_feat_elm(self, feat_elm):
feat_elm = prep_osmm_topo._prepare_feat_elm(self, feat_elm)
feat_elm = self._add_qgis_elms(feat_elm)
return feat_elm
def _add_qgis_elms(self, feat_elm):
if feat_elm.tag == 'CartographicText':
text_render_elm = feat_elm.xpath('//textRendering')[0]
anchor_pos = int(text_render_elm.xpath('./anchorPosition/text()')[0])
try:
anchor_pos = self.anchorPosition[anchor_pos]
except:
anchor_pos = 4
elm = etree.SubElement(text_render_elm, 'qAnchorPos')
elm.text = anchor_pos
font = int(text_render_elm.xpath('./font/text()')[0])
try:
font = self.fonts[font]
except:
font = 'unknown font (%s)' % str(font)
elm = etree.SubElement(text_render_elm, 'qFont')
elm.text = font
return feat_elm
class prep_osmm_itn(prep_osgml):
"""
Preparation class for OS MasterMap ITN features.
"""
def __init__(self, filename):
prep_osgml.__init__(self, filename)
self.feat_types = [
'Road',
'RoadLink',
'RoadNode',
'FerryLink',
'FerryNode',
'FerryTerminal',
'InformationPoint',
'RoadNodeInformation',
'RoadLinkInformation',
'RoadRouteInformation'
]
def _prepare_feat_elm(self, feat_elm):
feat_elm = prep_osgml._prepare_feat_elm(self, feat_elm)
feat_elm = self._expose_attributes(feat_elm)
feat_elm = self._add_datetime_summary(feat_elm)
feat_elm = self._add_datetime_json(feat_elm)
return feat_elm
def _expose_attributes(self, feat_elm):
elm_list = feat_elm.xpath("""//networkMember |
//directedLink |
//directedNode |
//referenceToRoadLink |
//referenceToRoadNode |
//referenceToTopographicArea |
//referenceToNetwork |
//vehicleQualifier/type |
//vehicleQualifier/use""")
# Default attribute values for optional attributes
defaults = {
'directedNode': {'gradeSeparation': '0'},
'referenceToRoadNode': {'gradeSeparation': '0'}
}
for elm in elm_list:
# Assign default values to optional attributes
if elm.tag in defaults.keys():
for key, val in defaults[elm.tag].items():
if key not in elm.attrib:
elm.attrib[key] = val
for name in elm.attrib:
value = elm.get(name)
name = '%s_%s' % (elm.tag, name)
sub_elm = etree.SubElement(elm if not elm.text else elm.getparent(), name)
sub_elm.text = value
return feat_elm
def _add_datetime_summary(self, feat_elm):
def elm_str(elm):
return elm.tag + ((': ' + elm.text) if elm.text else '')
for elm in feat_elm.xpath('//dateTimeQualifier'):
# Create a basic summary by listing tag names and values
value = ', '.join(map(elm_str, elm.xpath(".//*")))
sub_elm = etree.SubElement(feat_elm, 'dateTimeQualifier_summary')
sub_elm.text = value
return feat_elm
def _add_datetime_json(self, feat_elm):
""" Add a JSON representation of dateTimeQualifier elements """
elms = feat_elm.xpath('//dateTimeQualifier')
if elms:
objs = [objectify.fromstring(etree.tostring(elm)) for elm in elms]
sub_elm = etree.SubElement(feat_elm, 'dateTimeQualifier_json')
sub_elm.text = ObjectifyJSONEncoder().encode(objs)
return feat_elm
class prep_addressbase():
"""
Simple preperation of AddressBase data
"""
def __init__(self, inputfile):
self.inputfile = inputfile
self.feat_types = ['Address']
def get_feat_types(self):
return self.feat_types
def prepare_feature(self, feat_str):
# Parse the xml string into something useful
feat_elm = etree.fromstring(feat_str)
feat_elm = self._prepare_feat_elm(feat_elm)
return etree.tostring(feat_elm,
encoding='UTF-8',
pretty_print=True).decode('utf_8')
def _prepare_feat_elm(self, feat_elm):
feat_elm = self._drop_gmlid(feat_elm)
return feat_elm
def _drop_gmlid(self, feat_elm):
feat_elm.attrib.pop('id')
return feat_elm
class prep_addressbase_premium(prep_addressbase):
"""
Preperation of AddressBase Premium data
"""
def __init__(self, inputfile):
prep_addressbase.__init__(self, inputfile)
self.feat_types = ['BasicLandPropertyUnit', 'Street']
def prepare_feature(self, feat_str):
# Parse the xml string into something useful
feat_elm = etree.fromstring(feat_str)
# Manipulate the feature
feat_elm = self._prepare_feat_elm(feat_elm)
# In this instance we are not returning a string representing a single
# element as we are unnesting features in the AddressBase Premium GML.
# We end up returning a string of several elements which are wrapped in
# the output document with either a streetMember or
# basicLandPropertyUnitMember element which result it valid XML
elms = [etree.tostring(feat_elm,
encoding='UTF-8',
pretty_print=True).decode('utf_8')]
for elm in self.member_elms:
elms.append(
etree.tostring(elm, encoding='UTF-8',
pretty_print=True).decode('utf_8'))
return ''.join(elms)
def _prepare_feat_elm(self, feat_elm):
feat_elm = prep_addressbase._prepare_feat_elm(self, feat_elm)
feat_elm = self._to_multipoint(feat_elm)
self.member_elms = self._extract_child_members(feat_elm)
return feat_elm
def _to_multipoint(self, feat_elm):
""" Move Street streetStart and streetEnd Point elements into a
MultiPoint """
if feat_elm.tag == 'Street':
multi_elm = etree.SubElement(etree.SubElement(feat_elm, 'geom'),
'MultiPoint')
point_elms = feat_elm.xpath('//Point')
for point_elm in point_elms:
etree.SubElement(multi_elm, 'pointMember').append(point_elm)
return feat_elm
def _extract_child_members(self, feat_elm):
""" Unnest BLPU and Street feature types adding a reference to uprn or
usrn as appropriate """
if feat_elm.tag == 'BasicLandPropertyUnit':
uprn = feat_elm.findtext('uprn')
child_elms = feat_elm.xpath("""//Classification |
//LandPropertyIdentifier |
//ApplicationCrossReference |
//DeliveryPointAddress |
//Organisation""")
for elm in child_elms:
elm.getparent().remove(elm)
elm = self._add_lang_elm(elm)
sub_elm = etree.SubElement(elm, 'uprn')
sub_elm.text = uprn
if feat_elm.tag == 'Street':
usrn = feat_elm.findtext('usrn')
child_elms = feat_elm.xpath("//StreetDescriptiveIdentifier")
for elm in child_elms:
elm.getparent().remove(elm)
elm = self._add_lang_elm(elm)
sub_elm = etree.SubElement(elm, 'usrn')
sub_elm.text = usrn
return child_elms
def _add_lang_elm(self, feat_elm):
if feat_elm.tag in ['StreetDescriptiveIdentifier', 'LandPropertyIdentifier']:
elm = etree.SubElement(feat_elm, "lang")
try:
lang = feat_elm.xpath('.//@lang')[0]
except IndexError:
lang = 'en'
elm.text = lang
return feat_elm
class prep_osmm_water():
"""
Preperation of OSMM Water Layer features
"""
def __init__(self, inputfile):
self.inputfile = inputfile
self.feat_types = ['WatercourseLink', 'HydroNode']
def prepare_feature(self, feat_str):
# Parse the xml string into something useful
feat_elm = etree.fromstring(feat_str)
feat_elm = self._prepare_feat_elm(feat_elm)
return etree.tostring(feat_elm,
encoding='UTF-8',
pretty_print=True).decode('utf_8')
def _prepare_feat_elm(self, feat_elm):
feat_elm = self._add_fid_elm(feat_elm)
feat_elm = self._add_filename_elm(feat_elm)
feat_elm = self._add_start_end_node_elm(feat_elm)
feat_elm = self._add_code_list_values(feat_elm)
return feat_elm
def _add_fid_elm(self, feat_elm):
# Create an element with the fid
elm = etree.SubElement(feat_elm, "fid")
elm.text = feat_elm.get('id')
return feat_elm
def _add_filename_elm(self, feat_elm):
# Create an element with the filename
elm = etree.SubElement(feat_elm, "filename")
elm.text = os.path.basename(self.inputfile)
return feat_elm
def _add_start_end_node_elm(self, feat_elm):
start_elm = feat_elm.xpath('//startNode')
if len(start_elm):
etree.SubElement(feat_elm,
'startNode').text = start_elm[0].get('href')[1:]
end_elm = feat_elm.xpath('//endNode')
if len(end_elm):
etree.SubElement(feat_elm,
'endNode').text = end_elm[0].get('href')[1:]
return feat_elm
def _add_code_list_values(self, feat_elm):
list_elms = feat_elm.xpath("""//reasonForChange |
//form |
//provenance |
//levelOfDetail""")
r = re.compile('#(.*)$')
for elm in list_elms:
matches = r.findall(elm.get('href'))
if len(matches):
elm.text = matches[0]
return feat_elm
class prep_emapsite_addressbase_premium(prep_osgml):
"""
Prepare emapsite OS AddressBase Premium GML output by FME
"""
def __init__(self, inputfile):
prep_osgml.__init__(self, inputfile)
# Looking at the sample data it doesn't appear as though the name of
# the AddressBaseT_Plus feature type is likely to be the same for each
# supply so as there is only one feature type simply specify the
# containing featureMember
self.feat_types = ['featureMember']
def _prepare_feat_elm(self, feat_elm):
feat_elm = self._add_geom(feat_elm)
return feat_elm
def _add_geom(self, feat_elm):
""" Add a GML Point element to a feature with coordinates taken from
the x_coordinate and y_coordinate fields """
pos_elm = etree.SubElement(feat_elm, 'Pos')
pos_elm.text = '%s %s' % (feat_elm.findtext('.//x_coordinate'), feat_elm.findtext('.//y_coordinate'))
pnt_elm = etree.SubElement(feat_elm, 'Point')
pnt_elm.attrib['srsName'] = 'EPSG:27700'
pnt_elm.append(pos_elm)
# Append the Point element to the first child
list(feat_elm)[0].append(pnt_elm)
return feat_elm
class ObjectifyJSONEncoder(json.JSONEncoder):
""" JSON encoder that can handle simple lxml objectify types,
based on the original: https://gist.github.com/aisipos/345559, extended
to accommodate encoding child nodes with the same tag name as a list.
Usage:
>>> import json
>>> import lxml
>>> from lxml import objectify
>>> obj = objectify.fromstring("<author><name>W. Shakespeare</name><play>Twelfth Night</play><play>As You Like It</play></author>")
>>> json.dumps(obj, cls=ObjectifyJSONEncoder)
'{"play": ["Twelfth Night", "As You Like It"], "name": "W. Shakespeare"}'
"""
def default(self, o):
if isinstance(o, lxml.objectify.IntElement):
return int(o)
if isinstance(o, lxml.objectify.NumberElement) or isinstance(o, lxml.objectify.FloatElement):
return float(o)
if isinstance(o, lxml.objectify.ObjectifiedDataElement):
return str(o)
if hasattr(o, '__dict__'):
# objectify elements act like dicts to allow access to child nodes
# via their tag name. If an element has more than one child of the
# same name the dict only contains the first value against the tag
# name; to ensure all children are encoded create a list of child
# node values and assign it to the key that matches their tag name.
d = o.__dict__.copy()
for k in d.keys():
if len(d[k]) > 1:
d[k] = [i for i in d[k]]
return d
return json.JSONEncoder.default(self, o)
|
the-stack_106_29405 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for Superset"""
import json
import unittest
from random import random
from flask import escape
from sqlalchemy import func
from superset import db, security_manager
from superset.connectors.sqla.models import SqlaTable
from superset.models import core as models
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from .base_tests import SupersetTestCase
class DashboardTests(SupersetTestCase):
def __init__(self, *args, **kwargs):
super(DashboardTests, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def get_mock_positions(self, dash):
positions = {"DASHBOARD_VERSION_KEY": "v2"}
for i, slc in enumerate(dash.slices):
id = "DASHBOARD_CHART_TYPE-{}".format(i)
d = {
"type": "DASHBOARD_CHART_TYPE",
"id": id,
"children": [],
"meta": {"width": 4, "height": 50, "chartId": slc.id},
}
positions[id] = d
return positions
def test_dashboard(self):
self.login(username="admin")
urls = {}
for dash in db.session.query(Dashboard).all():
urls[dash.dashboard_title] = dash.url
for title, url in urls.items():
assert escape(title) in self.client.get(url).data.decode("utf-8")
def test_new_dashboard(self):
self.login(username="admin")
dash_count_before = db.session.query(func.count(Dashboard.id)).first()[0]
url = "/dashboard/new/"
resp = self.get_resp(url)
self.assertIn("[ untitled dashboard ]", resp)
dash_count_after = db.session.query(func.count(Dashboard.id)).first()[0]
self.assertEqual(dash_count_before + 1, dash_count_after)
def test_dashboard_modes(self):
self.login(username="admin")
dash = db.session.query(Dashboard).filter_by(slug="births").first()
url = dash.url
if dash.url.find("?") == -1:
url += "?"
else:
url += "&"
resp = self.get_resp(url + "edit=true&standalone=true")
self.assertIn("editMode": true", resp)
self.assertIn("standalone_mode": true", resp)
self.assertIn('<body class="standalone">', resp)
def test_save_dash(self, username="admin"):
self.login(username=username)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
positions = self.get_mock_positions(dash)
data = {
"css": "",
"expanded_slices": {},
"positions": positions,
"dashboard_title": dash.dashboard_title,
}
url = "/superset/save_dash/{}/".format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
self.assertIn("SUCCESS", resp)
def test_save_dash_with_filter(self, username="admin"):
self.login(username=username)
dash = db.session.query(Dashboard).filter_by(slug="world_health").first()
positions = self.get_mock_positions(dash)
filters = {str(dash.slices[0].id): {"region": ["North America"]}}
default_filters = json.dumps(filters)
data = {
"css": "",
"expanded_slices": {},
"positions": positions,
"dashboard_title": dash.dashboard_title,
"default_filters": default_filters,
}
url = "/superset/save_dash/{}/".format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
self.assertIn("SUCCESS", resp)
updatedDash = db.session.query(Dashboard).filter_by(slug="world_health").first()
new_url = updatedDash.url
self.assertIn("region", new_url)
resp = self.get_resp(new_url)
self.assertIn("North America", resp)
def test_save_dash_with_invalid_filters(self, username="admin"):
self.login(username=username)
dash = db.session.query(Dashboard).filter_by(slug="world_health").first()
# add an invalid filter slice
positions = self.get_mock_positions(dash)
filters = {str(99999): {"region": ["North America"]}}
default_filters = json.dumps(filters)
data = {
"css": "",
"expanded_slices": {},
"positions": positions,
"dashboard_title": dash.dashboard_title,
"default_filters": default_filters,
}
url = "/superset/save_dash/{}/".format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
self.assertIn("SUCCESS", resp)
updatedDash = db.session.query(Dashboard).filter_by(slug="world_health").first()
new_url = updatedDash.url
self.assertNotIn("region", new_url)
def test_save_dash_with_dashboard_title(self, username="admin"):
self.login(username=username)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
origin_title = dash.dashboard_title
positions = self.get_mock_positions(dash)
data = {
"css": "",
"expanded_slices": {},
"positions": positions,
"dashboard_title": "new title",
}
url = "/superset/save_dash/{}/".format(dash.id)
self.get_resp(url, data=dict(data=json.dumps(data)))
updatedDash = db.session.query(Dashboard).filter_by(slug="births").first()
self.assertEqual(updatedDash.dashboard_title, "new title")
# bring back dashboard original title
data["dashboard_title"] = origin_title
self.get_resp(url, data=dict(data=json.dumps(data)))
def test_save_dash_with_colors(self, username="admin"):
self.login(username=username)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
positions = self.get_mock_positions(dash)
new_label_colors = {"data value": "random color"}
data = {
"css": "",
"expanded_slices": {},
"positions": positions,
"dashboard_title": dash.dashboard_title,
"color_namespace": "Color Namespace Test",
"color_scheme": "Color Scheme Test",
"label_colors": new_label_colors,
}
url = "/superset/save_dash/{}/".format(dash.id)
self.get_resp(url, data=dict(data=json.dumps(data)))
updatedDash = db.session.query(Dashboard).filter_by(slug="births").first()
self.assertIn("color_namespace", updatedDash.json_metadata)
self.assertIn("color_scheme", updatedDash.json_metadata)
self.assertIn("label_colors", updatedDash.json_metadata)
# bring back original dashboard
del data["color_namespace"]
del data["color_scheme"]
del data["label_colors"]
self.get_resp(url, data=dict(data=json.dumps(data)))
def test_copy_dash(self, username="admin"):
self.login(username=username)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
positions = self.get_mock_positions(dash)
new_label_colors = {"data value": "random color"}
data = {
"css": "",
"duplicate_slices": False,
"expanded_slices": {},
"positions": positions,
"dashboard_title": "Copy Of Births",
"color_namespace": "Color Namespace Test",
"color_scheme": "Color Scheme Test",
"label_colors": new_label_colors,
}
# Save changes to Births dashboard and retrieve updated dash
dash_id = dash.id
url = "/superset/save_dash/{}/".format(dash_id)
self.client.post(url, data=dict(data=json.dumps(data)))
dash = db.session.query(Dashboard).filter_by(id=dash_id).first()
orig_json_data = dash.data
# Verify that copy matches original
url = "/superset/copy_dash/{}/".format(dash_id)
resp = self.get_json_resp(url, data=dict(data=json.dumps(data)))
self.assertEqual(resp["dashboard_title"], "Copy Of Births")
self.assertEqual(resp["position_json"], orig_json_data["position_json"])
self.assertEqual(resp["metadata"], orig_json_data["metadata"])
# check every attribute in each dashboard's slices list,
# exclude modified and changed_on attribute
for index, slc in enumerate(orig_json_data["slices"]):
for key in slc:
if key not in ["modified", "changed_on"]:
self.assertEqual(slc[key], resp["slices"][index][key])
def test_add_slices(self, username="admin"):
self.login(username=username)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
new_slice = (
db.session.query(Slice).filter_by(slice_name="Energy Force Layout").first()
)
existing_slice = (
db.session.query(Slice).filter_by(slice_name="Girl Name Cloud").first()
)
data = {
"slice_ids": [new_slice.data["slice_id"], existing_slice.data["slice_id"]]
}
url = "/superset/add_slices/{}/".format(dash.id)
resp = self.client.post(url, data=dict(data=json.dumps(data)))
assert "SLICES ADDED" in resp.data.decode("utf-8")
dash = db.session.query(Dashboard).filter_by(slug="births").first()
new_slice = (
db.session.query(Slice).filter_by(slice_name="Energy Force Layout").first()
)
assert new_slice in dash.slices
assert len(set(dash.slices)) == len(dash.slices)
# cleaning up
dash = db.session.query(Dashboard).filter_by(slug="births").first()
dash.slices = [o for o in dash.slices if o.slice_name != "Energy Force Layout"]
db.session.commit()
def test_remove_slices(self, username="admin"):
self.login(username=username)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
origin_slices_length = len(dash.slices)
positions = self.get_mock_positions(dash)
# remove one chart
chart_keys = []
for key in positions.keys():
if key.startswith("DASHBOARD_CHART_TYPE"):
chart_keys.append(key)
positions.pop(chart_keys[0])
data = {
"css": "",
"expanded_slices": {},
"positions": positions,
"dashboard_title": dash.dashboard_title,
}
# save dash
dash_id = dash.id
url = "/superset/save_dash/{}/".format(dash_id)
self.client.post(url, data=dict(data=json.dumps(data)))
dash = db.session.query(Dashboard).filter_by(id=dash_id).first()
# verify slices data
data = dash.data
self.assertEqual(len(data["slices"]), origin_slices_length - 1)
def test_public_user_dashboard_access(self):
table = db.session.query(SqlaTable).filter_by(table_name="birth_names").one()
# Make the births dash published so it can be seen
births_dash = db.session.query(Dashboard).filter_by(slug="births").one()
births_dash.published = True
db.session.merge(births_dash)
db.session.commit()
# Try access before adding appropriate permissions.
self.revoke_public_access_to_table(table)
self.logout()
resp = self.get_resp("/chart/list/")
self.assertNotIn("birth_names</a>", resp)
resp = self.get_resp("/dashboard/list/")
self.assertNotIn("/superset/dashboard/births/", resp)
self.grant_public_access_to_table(table)
# Try access after adding appropriate permissions.
self.assertIn("birth_names", self.get_resp("/chart/list/"))
resp = self.get_resp("/dashboard/list/")
self.assertIn("/superset/dashboard/births/", resp)
self.assertIn("Births", self.get_resp("/superset/dashboard/births/"))
# Confirm that public doesn't have access to other datasets.
resp = self.get_resp("/chart/list/")
self.assertNotIn("wb_health_population</a>", resp)
resp = self.get_resp("/dashboard/list/")
self.assertNotIn("/superset/dashboard/world_health/", resp)
def test_dashboard_with_created_by_can_be_accessed_by_public_users(self):
self.logout()
table = db.session.query(SqlaTable).filter_by(table_name="birth_names").one()
self.grant_public_access_to_table(table)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
dash.owners = [security_manager.find_user("admin")]
dash.created_by = security_manager.find_user("admin")
db.session.merge(dash)
db.session.commit()
assert "Births" in self.get_resp("/superset/dashboard/births/")
def test_only_owners_can_save(self):
dash = db.session.query(Dashboard).filter_by(slug="births").first()
dash.owners = []
db.session.merge(dash)
db.session.commit()
self.test_save_dash("admin")
self.logout()
self.assertRaises(Exception, self.test_save_dash, "alpha")
alpha = security_manager.find_user("alpha")
dash = db.session.query(Dashboard).filter_by(slug="births").first()
dash.owners = [alpha]
db.session.merge(dash)
db.session.commit()
self.test_save_dash("alpha")
def test_owners_can_view_empty_dashboard(self):
dash = db.session.query(Dashboard).filter_by(slug="empty_dashboard").first()
if not dash:
dash = Dashboard()
dash.dashboard_title = "Empty Dashboard"
dash.slug = "empty_dashboard"
else:
dash.slices = []
dash.owners = []
db.session.merge(dash)
db.session.commit()
gamma_user = security_manager.find_user("gamma")
self.login(gamma_user.username)
resp = self.get_resp("/dashboard/list/")
self.assertNotIn("/superset/dashboard/empty_dashboard/", resp)
def test_users_can_view_published_dashboard(self):
table = db.session.query(SqlaTable).filter_by(table_name="energy_usage").one()
# get a slice from the allowed table
slice = db.session.query(Slice).filter_by(slice_name="Energy Sankey").one()
self.grant_public_access_to_table(table)
hidden_dash_slug = f"hidden_dash_{random()}"
published_dash_slug = f"published_dash_{random()}"
# Create a published and hidden dashboard and add them to the database
published_dash = Dashboard()
published_dash.dashboard_title = "Published Dashboard"
published_dash.slug = published_dash_slug
published_dash.slices = [slice]
published_dash.published = True
hidden_dash = Dashboard()
hidden_dash.dashboard_title = "Hidden Dashboard"
hidden_dash.slug = hidden_dash_slug
hidden_dash.slices = [slice]
hidden_dash.published = False
db.session.merge(published_dash)
db.session.merge(hidden_dash)
db.session.commit()
resp = self.get_resp("/dashboard/list/")
self.assertNotIn(f"/superset/dashboard/{hidden_dash_slug}/", resp)
self.assertIn(f"/superset/dashboard/{published_dash_slug}/", resp)
def test_users_can_view_own_dashboard(self):
user = security_manager.find_user("gamma")
my_dash_slug = f"my_dash_{random()}"
not_my_dash_slug = f"not_my_dash_{random()}"
# Create one dashboard I own and another that I don't
dash = Dashboard()
dash.dashboard_title = "My Dashboard"
dash.slug = my_dash_slug
dash.owners = [user]
dash.slices = []
hidden_dash = Dashboard()
hidden_dash.dashboard_title = "Not My Dashboard"
hidden_dash.slug = not_my_dash_slug
hidden_dash.slices = []
hidden_dash.owners = []
db.session.merge(dash)
db.session.merge(hidden_dash)
db.session.commit()
self.login(user.username)
resp = self.get_resp("/dashboard/list/")
self.assertIn(f"/superset/dashboard/{my_dash_slug}/", resp)
self.assertNotIn(f"/superset/dashboard/{not_my_dash_slug}/", resp)
def test_users_can_view_favorited_dashboards(self):
user = security_manager.find_user("gamma")
fav_dash_slug = f"my_favorite_dash_{random()}"
regular_dash_slug = f"regular_dash_{random()}"
favorite_dash = Dashboard()
favorite_dash.dashboard_title = "My Favorite Dashboard"
favorite_dash.slug = fav_dash_slug
regular_dash = Dashboard()
regular_dash.dashboard_title = "A Plain Ol Dashboard"
regular_dash.slug = regular_dash_slug
db.session.merge(favorite_dash)
db.session.merge(regular_dash)
db.session.commit()
dash = db.session.query(Dashboard).filter_by(slug=fav_dash_slug).first()
favorites = models.FavStar()
favorites.obj_id = dash.id
favorites.class_name = "Dashboard"
favorites.user_id = user.id
db.session.merge(favorites)
db.session.commit()
self.login(user.username)
resp = self.get_resp("/dashboard/list/")
self.assertIn(f"/superset/dashboard/{fav_dash_slug}/", resp)
def test_user_can_not_view_unpublished_dash(self):
admin_user = security_manager.find_user("admin")
gamma_user = security_manager.find_user("gamma")
slug = f"admin_owned_unpublished_dash_{random()}"
# Create a dashboard owned by admin and unpublished
dash = Dashboard()
dash.dashboard_title = "My Dashboard"
dash.slug = slug
dash.owners = [admin_user]
dash.slices = []
dash.published = False
db.session.merge(dash)
db.session.commit()
# list dashboards as a gamma user
self.login(gamma_user.username)
resp = self.get_resp("/dashboard/list/")
self.assertNotIn(f"/superset/dashboard/{slug}/", resp)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_29406 | #!/usr/bin/env python3
import sys
from collections import deque
def is_safe(grid, x, y, distances):
return x >= 0 and x < len(grid) and y >= 0 and y < len(grid) and distances[x][y] == -1 and grid[x][y] != 'X'
def get_safe_moves(grid, node, distances):
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
variants = []
for di in directions:
nunode = (node[0] + di[0], node[1] + di[1])
while is_safe(grid, nunode[0], nunode[1], distances):
variants.append(nunode)
nunode = (nunode[0] + di[0], nunode[1] + di[1])
return variants
def minimumMoves(grid, startX, startY, goalX, goalY):
next_to_visit = deque()
node = (startX, startY)
next_to_visit.appendleft(node)
distances = [[-1]*len(grid) for _ in range(len(grid))]
distances[startX][startY] = 0
while next_to_visit:
node = next_to_visit.pop()
#print("point = ({}, {})".format(node[0], node[1]))
#for row in distances:
# print(row)
#print()
height = distances[node[0]][node[1]]
variants = get_safe_moves(grid, node, distances)
for var in variants:
if var == (goalX, goalY):
return height + 1
distances[var[0]][var[1]] = height + 1
next_to_visit.appendleft(var)
return -1
if __name__ == "__main__":
n = int(input().strip())
grid = []
for _ in range(n):
layer = list(input().strip())
grid.append(layer)
startX, startY, goalX, goalY = [int(i) for i in input().strip().split()]
result = minimumMoves(grid, startX, startY, goalX, goalY)
print(result)
|
the-stack_106_29407 | from __future__ import absolute_import
from __future__ import print_function
import math
import numpy as np
import unittest
import ray
import cython_examples as cyth
def get_ray_result(cython_func, *args):
func = ray.remote(cython_func)
return ray.get(func.remote(*args))
class CythonTest(unittest.TestCase):
def setUp(self):
ray.init(object_store_memory=int(150 * 1024 * 1024))
def tearDown(self):
ray.shutdown()
def assertEqualHelper(self, cython_func, expected, *args):
assert get_ray_result(cython_func, *args) == expected
def test_simple_func(self):
self.assertEqualHelper(cyth.simple_func, 6, 1, 2, 3)
self.assertEqualHelper(cyth.fib, 55, 10)
self.assertEqualHelper(cyth.fib_int, 55, 10)
self.assertEqualHelper(cyth.fib_cpdef, 55, 10)
self.assertEqualHelper(cyth.fib_cdef, 55, 10)
def test_simple_class(self):
cls = ray.remote(cyth.simple_class)
a1 = cls.remote()
a2 = cls.remote()
result1 = ray.get(a1.increment.remote())
result2 = ray.get(a2.increment.remote())
result3 = ray.get(a2.increment.remote())
assert result1 == 1
assert result2 == 1
assert result3 == 2
def test_numpy(self):
array = np.array([-1.0, 0.0, 1.0, 2.0])
answer = [float("-inf") if x <= 0 else math.log(x) for x in array]
result = get_ray_result(cyth.masked_log, array)
np.testing.assert_array_equal(answer, result)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
the-stack_106_29408 | from django.shortcuts import render
from index.models import *
from django.http import StreamingHttpResponse
# Create your views here.
def playView(request,song_id):
# 热搜歌曲
search_song = Dynamic.objects.select_related('song').order_by('-dynamic_search').all()[:6]
# 歌曲信息
song_info = Song.objects.get(song_id=int(song_id))
# 播放列表
play_list = request.session.get('play_list', [])
song_exist = False
if play_list:
for i in play_list:
if int(song_id) == i['song_id']:
song_exist = True
if song_exist == False:
play_list.append(
{'song_id': int(song_id), 'song_singer': song_info.song_singer, 'song_name': song_info.song_name,
'song_time': song_info.song_time})
request.session['play_list'] = play_list
# 歌词
if song_info.song_lyrics != '暂无歌词':
f = open('static/songLyric/' + song_info.song_lyrics, 'r', encoding='utf-8')
song_lyrics = f.read()
f.close()
# 相关歌曲
song_type = Song.objects.values('song_type').get(song_id=song_id)['song_type']
song_relevant = Dynamic.objects.select_related('song').filter(song__song_type=song_type).order_by(
'-dynamic_plays').all()[:6]
# 添加播放次数
# 扩展功能:可使用session实现每天只添加一次播放次数
dynamic_info = Dynamic.objects.filter(song_id=int(song_id)).first()
# 判断歌曲动态信息是否存在,存在就在原来基础上加1
if dynamic_info:
dynamic_info.dynamic_plays += 1
dynamic_info.save()
# 动态信息不存在则创建新的动态信息
else:
dynamic_info = Dynamic(dynamic_plays=1, dynamic_search=0, dynamic_down=0, song_id=song_id)
dynamic_info.save()
return render(request, 'play.html', locals())
# 歌曲下载
def downloadView(request, song_id):
# 根据song_id查找歌曲信息
song_info = Song.objects.get(song_id=int(song_id))
# 添加下载次数
dynamic_info = Dynamic.objects.filter(song_id=int(song_id)).first()
# 判断歌曲动态信息是否存在,存在就在原来基础上加1
if dynamic_info:
dynamic_info.dynamic_down += 1
dynamic_info.save()
# 动态信息不存在则创建新的动态信息
else:
dynamic_info = Dynamic(dynamic_plays=0,dynamic_search=0,dynamic_down=1,song_id=song_id)
dynamic_info.save()
# 读取文件内容
file = 'static/songFile/' + song_info.song_file
def file_iterator(file, chunk_size=512):
with open(file, 'rb') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
# 将文件内容写入StreamingHttpResponse对象,并以字节流方式返回给用户,实现文件下载
filename = str(song_id) + '.mp3'
response = StreamingHttpResponse(file_iterator(file))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment; filename="%s"' %(filename)
return response |
the-stack_106_29409 | import os
import re
import shlex
import stat
import winreg
from collections import defaultdict
from contextlib import contextmanager
from dataclasses import dataclass
from datetime import date, datetime
from pathlib import Path
from typing import Optional, Generator, Any, Tuple, Callable, Mapping, ContextManager, Collection
import plumbum
DEFAULT_SEARCH_FLAGS = re.IGNORECASE
@dataclass
class InstalledApplication:
name: str
version: Optional[str] = None
install_date: Optional[date] = None
install_location: Optional[Path] = None
install_source: Optional[Path] = None
modify_path: Optional[str] = None
publisher: Optional[str] = None
uninstall_string: Optional[str] = None
def modify(self, *args) -> None:
command = _command(self.modify_path, *args)
command()
def uninstall(self, *args) -> None:
command = _command(self.uninstall_string, *args)
command()
def list_installed() -> Generator[InstalledApplication, None, None]:
application_or_none_generator = (_installed_application(application_key)
for application_key in _installed_application_keys())
return (application for application in application_or_none_generator if application is not None)
def search_installed(name: Optional[str] = None,
*,
search_flags: int = DEFAULT_SEARCH_FLAGS,
**search_fields) -> Generator[InstalledApplication, None, None]:
patterns = {key: value for key, value in {'name': name, **search_fields}.items() if value}
for app in list_installed():
matches = True
for field_name, pattern in patterns.items():
if not re.search(pattern=pattern, string=str(getattr(app, field_name, '')), flags=search_flags):
matches = False
break
if matches:
yield app
def uninstall(name: Optional[str] = None,
args: Optional[Collection] = None,
*,
search_flags: int = DEFAULT_SEARCH_FLAGS,
**search_fields) -> None:
while True:
try:
app = next(search_installed(name=name, search_flags=search_flags, **search_fields))
except StopIteration:
break
if args is None:
args = []
app.uninstall(*args)
@contextmanager
def uninstalled(name: Optional[str] = None,
args: Optional[Collection] = None,
*,
search_flags: int = DEFAULT_SEARCH_FLAGS,
**search_fields) -> ContextManager[None]:
uninstall_kwargs = {
'name': name,
'args': args,
'search_flags': search_flags,
**search_fields,
}
uninstall(**uninstall_kwargs)
try:
yield
finally:
uninstall(**uninstall_kwargs)
_ROOT_KEY = winreg.HKEY_LOCAL_MACHINE
_VALUE_NOT_SET = '(value not set)'
def _none_on_value_not_set(value: Any) -> Any:
return value if value != _VALUE_NOT_SET else None
_REGISTRY_KEY_TO_APPLICATION_FIELD_DICT: Mapping[str, Optional[Callable]] = defaultdict(lambda: None, **{
'DisplayName': lambda value: ('name', _none_on_value_not_set(value)),
'DisplayVersion': lambda value: ('version', _none_on_value_not_set(str(value))),
'InstallDate': lambda value: ('install_date', _none_on_value_not_set(value) and _date_check(value)),
'InstallLocation': lambda value: ('install_location', _none_on_value_not_set(value) and Path(value)),
'InstallSource': lambda value: ('install_source', _none_on_value_not_set(value) and Path(value)),
'ModifyPath': lambda value: ('modify_path', _none_on_value_not_set(value)),
'Publisher': lambda value: ('publisher', _none_on_value_not_set(value)),
'UninstallString': lambda value: ('uninstall_string', _none_on_value_not_set(value)),
})
def _date_check(date):
date = str(date)
if len(date) == 10 and date.isdigit(): # 1577836800 (Timestamp format)
return datetime.fromtimestamp(int(date)).date()
elif len(date) == 8 and date.isdigit(): # 20200101 (YMD no separator format)
return datetime.strptime(date[:8], "%Y%m%d").date()
elif "/" in date: # 1/1/2020 (MDY non leading zero format)
filled = [x.zfill(2) for x in date.split('/')]
return datetime.strptime("".join(filled), "%m%d%Y").date()
def _installed_application_keys() -> Generator[str, None, None]:
uninstall_keys = [
r'Software\Microsoft\Windows\CurrentVersion\Uninstall',
r'Software\WOW6432Node\Microsoft\Windows\CurrentVersion\Uninstall'
]
for uninstall_key in uninstall_keys:
uninstall_opened_key = winreg.OpenKey(_ROOT_KEY, uninstall_key)
i = 0
while True:
try:
application = winreg.EnumKey(uninstall_opened_key, i)
yield f'{uninstall_key}\\{application}'
i += 1
except OSError:
break
def _installed_application_registry_values(application_key: str) -> Generator[Tuple[str, Any, int], None, None]:
application_opened_key = winreg.OpenKey(_ROOT_KEY, application_key)
i = 0
while True:
try:
data = winreg.EnumValue(application_opened_key, i)
yield data
i += 1
except OSError:
return
def _installed_application(application_key: str) -> Optional[InstalledApplication]:
def skip() -> bool:
def guid_to_squid(guid: str) -> str:
"""Taken from salt.utils.win_functions"""
guid_pattern = re.compile(r'^\{(\w{8})-(\w{4})-(\w{4})-(\w\w)(\w\w)-(\w\w)(\w\w)(\w\w)(\w\w)(\w\w)(\w\w)\}$')
guid_match = guid_pattern.match(guid)
# noinspection PyShadowingNames
result = ''
if guid_match is not None:
for index in range(1, 12):
result += guid_match.group(index)[::-1]
return result
def key_exists(key: int, sub_key: str) -> bool:
try:
winreg.OpenKey(key, sub_key)
return True
except FileNotFoundError:
return False
is_system_component = name == 'SystemComponent' and int(value or 0) > 0
is_win_installer_absent_in_products = False
if name == 'WindowsInstaller' and int(value or 0) > 0:
squid = guid_to_squid(application_key.rpartition('\\')[2])
products_key = r'Software\Classes\Installer\Products' + '\\' + squid
if not key_exists(_ROOT_KEY, products_key):
is_win_installer_absent_in_products = True
is_update = ((name == 'ReleaseType' and value not in ['Hotfix', 'Security Update', 'Update Rollup'])
or (name == 'ParentKeyName'))
is_win_update = name == 'DisplayName' and bool(re.match(r'^KB[0-9]{6}', value))
# noinspection PyShadowingNames
result = is_system_component or is_win_installer_absent_in_products or is_update or is_win_update
return result
result = InstalledApplication(name='')
for data in _installed_application_registry_values(application_key):
name, value, type_ = data
if skip():
return None
f = _REGISTRY_KEY_TO_APPLICATION_FIELD_DICT[name]
if f is not None:
setattr(result, *f(value))
if not result.name:
return None
return result
def _command(command_str: str, *args) -> plumbum.commands.BaseCommand:
def is_executable(path) -> bool:
return command_path.is_file() and bool(stat.S_IMODE(path.stat().st_mode) & os.X_OK)
if command_str.startswith(("'", '"')):
command_list = shlex.split(command_str, posix=False)
command_path = command_list[0][1:-1]
command_args = command_list[1:]
else:
space_index = command_str.find(' ')
command_path = Path(command_str[:space_index])
while not is_executable(command_path) and space_index > -1:
space_index = command_str.find(' ', space_index + 1)
command_path = Path(command_str[:(space_index if space_index > -1 else len(command_str))])
command_args = shlex.split(command_str[space_index + 1:], posix=False) if space_index > -1 else []
command_args += args
return plumbum.local[str(command_path)][command_args]
|
the-stack_106_29410 | '''注意,这个模块可能死锁'''
from ..fakepath import fakepath_abs , new_fakefolder
import os
import random
import os.path as P
my_id = "{pid}_{seed}".format( pid = os.getpid() , seed = random.randint(0,233333)) #进程相关的id
name = "YFILELOCK/"
new_fakefolder(name)
def get_path(foldername):
'''给定lock的文件夹名,创建对应文件夹'''
pt = fakepath_abs(P.join(name , foldername))
new_fakefolder(pt)
return pt
def checkfile(path , my_filename):
'''参数是绝对路径,检查这个路径下面有没有.lock结尾的文件,返回True表示没有。'''
return len( [ x for x in os.listdir(path) if x.endswith(".lock") and x != my_filename] ) == 0
def acquire_lock(foldername , my_name = "0"):
my_filename = "{my_id}_{my_name}.lock".format(my_id = my_id , my_name = my_name) #生成自己的锁文件的文件名
path = get_path(foldername) #锁目录
my_file = P.join(path , my_filename) #自己的锁文件的文件路径
while True:
# 等待其他进程释放锁
while not checkfile(path , my_filename): pass
# 上自己的锁
open(my_file , "w").close()
if not checkfile(path , my_filename): # 如果发现还有不是自己的锁
os.remove(my_file) # 就删掉自己的锁
continue # 并重新检查
break # 加锁成功
def release_lock(foldername , my_name = "0"):
my_filename = "{my_id}_{my_name}.lock".format(my_id = my_id , my_name = my_name) #生成自己的锁文件的文件名
path = get_path(foldername) #锁目录
my_file = P.join(path , my_filename) #自己的锁文件的文件名
os.remove(my_file) # 删掉自己的锁
class FileLock:
'''锁定一个文件(目录)'''
def __init__(self , foldername , threadname = None):
if threadname is None:
threadname = str( random.randint(0,233333) ) #生成一个线程相关的随机数
self.foldername = foldername
self.threadname = threadname
def __enter__(self):
acquire_lock(self.foldername , self.threadname)
return self
def __exit__(self , *args , **kwargs):
release_lock(self.foldername , self.threadname)
|
the-stack_106_29412 | from rl_agents.policies.gaussian import GaussianActor, ContinuousSample
from tensorflow_probability import distributions
import tensorflow as tf
import numpy as np
class LayerTest(tf.test.TestCase):
def setUp(self):
super(LayerTest, self).setUp()
self.layer = ContinuousSample((3,))
def testShape(self):
x = np.array([
[0.5, 1.3, 3.6],
[1.0, 2.0, 0.1],
]).astype(np.float32)
pi, logp_pi, dist, inputs = self.layer(x)
# print(inputs, x)
self.assertShapeEqual(np.zeros((2,3)), pi) # should be 0s and 1s
self.assertShapeEqual(np.zeros((2,)), logp_pi)
self.assertIsInstance(dist, distributions.Normal)
self.assertAllEqual(inputs, x)
if __name__ == '__main__':
tf.test.main()
|
the-stack_106_29414 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""TsFeatures is a module for performing adhoc feature engineering on time series
data using different statistics.
The module process time series data into features for machine learning models.
We include seasonality, autocorrelation, modeling parameter, changepoints,
moving statistics, and raw statistics of time series array as the adhoc features.
We also offer to compute part of the features or group of features using
selected_features argument, you could also disable feature or group of
features by setting feature_name/feature_group_name = False. You can find
all feature group names in feature_group_mapping attribute.
"""
import inspect
import logging
from functools import partial
from itertools import groupby
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import statsmodels.api as sm
from deprecated import deprecated
from scipy import stats
from scipy.signal import periodogram # @manual
from statsmodels.stats.diagnostic import het_arch
from statsmodels.tsa.seasonal import STL
from statsmodels.tsa.stattools import acf, pacf, kpss
try:
from numba import jit # @manual
except ImportError:
logging.warning("numba is not installed. jit compilation of tsfeatures is disabled")
def jit(**kwargs): # type: ignore
def jit_decorator(func): # type: ignore
return func
return jit_decorator
from kats.compat.statsmodels import ExponentialSmoothing
from kats.consts import TimeSeriesData
from kats.detectors import (
cusum_detection,
bocpd,
robust_stat_detection,
outlier,
trend_mk,
seasonality,
)
"""
Each entry in _ALL_TS_FEATURES is of the form
```
(method, params)
```
where `get_{method}` is the name of a method on TsFeatures that computes some
features, and `params` is a dictionary of `{name: val}` pairs, where `name` is
the name of an argument to the method, and `val` is the name of an attribute on
the TsFeatures instance to pass as the value to that argument. For example,
```
("stl_features", {
"period": "stl_period",
}),
```
is transformed into code like
```
if self.stl_features:
features = self.get_stl_features(x, extra_args=self.__kwargs__,
default_status=self.default,
period=self.stl_period)
)
else:
features = {}
```
in `TsFeatures._transform_1d()`. Notice that the first three arguments
(`x`, `extra_args`, `default_status`) are always passed to all methods.
`_transform_1d` passes as the first parameter either the time series data
instance or the numpy array of values, based on whether the method takes
`ts` or `x` as its first parameter, respectively.
"""
_ALL_TS_FEATURES: List[Tuple[str, Dict[str, str]]] = [
("statistics", {"dict_features": "statistics_features"}),
("stl_features", {"period": "stl_period"}),
("level_shift_features", {"window_size": "window_size"}),
("acfpacf_features", {"acfpacf_lag": "acfpacf_lag", "period": "stl_period"}),
("special_ac", {}),
("holt_params", {}),
("hw_params", {"period": "stl_period"}),
("cusum_detector", {}),
("robust_stat_detector", {}),
("bocp_detector", {}),
("outlier_detector", {"decomp": "decomp", "iqr_mult": "iqr_mult"}),
("trend_detector", {"threshold": "threshold"}),
("nowcasting", {"window": "window", "n_fast": "n_fast", "n_slow": "n_slow"}),
("seasonalities", {}),
("time", {}),
]
_FEATURE_GROUP_MAPPING: Dict[str, List[str]] = {
"stl_features": [
"trend_strength",
"seasonality_strength",
"spikiness",
"peak",
"trough",
],
"level_shift_features": [
"level_shift_idx",
"level_shift_size",
],
"acfpacf_features": [
"y_acf1",
"y_acf5",
"diff1y_acf1",
"diff1y_acf5",
"diff2y_acf1",
"diff2y_acf5",
"y_pacf5",
"diff1y_pacf5",
"diff2y_pacf5",
"seas_acf1",
"seas_pacf1",
],
"special_ac": [
"firstmin_ac",
"firstzero_ac",
],
"holt_params": [
"holt_alpha",
"holt_beta",
],
"hw_params": [
"hw_alpha",
"hw_beta",
"hw_gamma",
],
"statistics": [
"length",
"mean",
"var",
"entropy",
"lumpiness",
"stability",
"flat_spots",
"hurst",
"std1st_der",
"crossing_points",
"binarize_mean",
"unitroot_kpss",
"heterogeneity",
"histogram_mode",
"linearity",
],
"cusum_detector": [
"cusum_num",
"cusum_conf",
"cusum_cp_index",
"cusum_delta",
"cusum_llr",
"cusum_regression_detected",
"cusum_stable_changepoint",
"cusum_p_value",
],
"robust_stat_detector": [
"robust_num",
"robust_metric_mean",
],
"bocp_detector": [
"bocp_num",
"bocp_conf_max",
"bocp_conf_mean",
],
"outlier_detector": [
"outlier_num",
],
"trend_detector": [
"trend_num",
"trend_num_increasing",
"trend_avg_abs_tau",
],
"nowcasting": [
"nowcast_roc",
"nowcast_ma",
"nowcast_mom",
"nowcast_lag",
"nowcast_macd",
"nowcast_macdsign",
"nowcast_macddiff",
],
"seasonalities": [
"seasonal_period",
"trend_mag",
"seasonality_mag",
"residual_std",
],
"time": [
"time_years",
"time_months",
"time_monthsofyear",
"time_weeks",
"time_weeksofyear",
"time_days",
"time_daysofyear",
"time_avg_timezone_offset",
"time_length_days",
"time_freq_Monday",
"time_freq_Tuesday",
"time_freq_Wednesday",
"time_freq_Thursday",
"time_freq_Friday",
"time_freq_Saturday",
"time_freq_Sunday",
],
}
TSMethod = Callable[[TimeSeriesData], Dict[str, float]]
ArrayMethod = Callable[[np.ndarray], Dict[str, float]]
class TsFeatures:
"""Process time series data into features for machine learning models.
Attributes:
window_size: int; Length of the sliding window for getting level shift
features, lumpiness, and stability of time series.
spectral_freq: int; Frequency parameter in getting periodogram through
scipy for calculating Shannon entropy.
stl_period: int; Period parameter for performing seasonality trend
decomposition using LOESS with statsmodels.
nbins: int; Number of bins to equally segment time series array for
getting flat spot feature.
lag_size: int; Maximum number of lag values for calculating Hurst Exponent.
acfpacf_lag: int; Largest lag number for returning ACF/PACF features
via statsmodels.
decomp: str; Additive or Multiplicative mode for performing outlier
detection using Kats.Detectors.outlier.OutlierDetector.
iqr_mult: float; IQR range for determining outliers through
Kats.Detectors.outlier.OutlierDetector.
threshold: float; threshold for trend intensity; higher threshold gives
trend with high intensity (0.8 by default). If we only want to use
the p-value to determine changepoints, set threshold = 0.
window: int; length of window for all nowcasting features.
n_fast: int; length of "fast" or short period exponential moving average
in the MACD algorithm in the nowcasting features.
n_slow: int; length of "slow" or long period exponential moving average
in the MACD algorithm in the nowcasting features.
selected_features: None or List[str]; list of feature/feature group name(s)
selected to be calculated. We will try only calculating selected
features, since some features are bundled in the calculations. This
process helps with boosting efficiency, and we will only output
selected features.
feature_group_mapping: The dictionary with the mapping from individual
features to their bundled feature groups.
final_filter: A dicitonary with boolean as the values to filter out the
features not selected, yet calculated due to underlying bundles.
stl_features: Switch for calculating/outputting stl features.
level_shift_features: Switch for calculating/outputting level shift features.
acfpacf_features: Switch for calculating/outputting ACF/PACF features.
special_ac: Switch for calculating/outputting features.
holt_params: Switch for calculating/outputting holt parameter features.
hw_params: Switch for calculating/outputting holt-winters parameter features.
statistics: Switch for calculating/outputting raw statistics features.
cusum_detector: Switch for calculating/outputting features using cusum
detector in Kats.
robust_stat_detector: Switch for calculating/outputting features using
robust stat detector in Kats.
bocp_detector: Switch for calculating/outputting stl features features
using bocp detector in Kats.
outlier_detector: Switch for calculating/outputting stl features using
outlier detector in Kats.
trend_detector: Switch for calculating/outputting stl features using
trend detector in Kats.
nowcasting: Switch for calculating/outputting stl features using
nowcasting detector in Kats.
seasonalities: Switch for calculating/outputting stl features using
cusum detector in Kats.
time: Switch for calculating/outputting time features.
default: The default status of the switch for opt-in/out feature calculations.
"""
_total_feature_len_: int = 0
_ts_methods: Dict[str, TSMethod] = {}
_x_methods: Dict[str, ArrayMethod] = {}
def __init__(
self,
window_size: int = 20,
spectral_freq: int = 1,
stl_period: int = 7,
nbins: int = 10,
lag_size: int = 30,
acfpacf_lag: int = 6,
decomp: str = "additive",
iqr_mult: float = 3.0,
threshold: float = 0.8,
window: int = 5,
n_fast: int = 12,
n_slow: int = 21,
selected_features: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
# init hyper-parameters
self.window_size = window_size
self.spectral_freq = spectral_freq
self.stl_period = stl_period
self.nbins = nbins
self.lag_size = lag_size
self.acfpacf_lag = acfpacf_lag
self.decomp = decomp
self.iqr_mult = iqr_mult
self.threshold = threshold
self.window = window
self.n_fast = n_fast
self.n_slow = n_slow
# Mapping group features
g2f = dict(_FEATURE_GROUP_MAPPING)
self.feature_group_mapping: Dict[str, List[str]] = dict(g2f)
f2g = self._compute_f2g(kwargs, g2f)
# Higher level of features:
# Once disabled, won't even go inside these groups of features
# for calculation
final_filter, default = self._compute_final_filter(
selected_features, f2g, g2f, kwargs
)
self.final_filter: Dict[str, bool] = final_filter
self._set_defaults(kwargs, default)
self._setup(spectral_freq, window_size, nbins, lag_size)
self._compile_methods()
def _compute_f2g(
self, kwargs: Dict[str, Any], g2f: Dict[str, List[str]]
) -> Dict[str, str]:
f2g = {}
for k, v in g2f.items():
for f in v:
f2g[f] = k
self._total_feature_len_ = len(f2g)
for f in kwargs.keys():
if not (f in f2g.keys() or f in g2f.keys()):
msg = (
f"couldn't find your desired feature/group '{f}', please "
"check spelling"
)
logging.error(msg)
raise ValueError(msg)
return f2g
def _compute_final_filter(
self,
selected_features: Optional[List[str]],
f2g: Dict[str, str],
g2f: Dict[str, List[str]],
kwargs: Dict[str, Any],
) -> Tuple[Dict[str, bool], bool]:
default = not selected_features
final_filter = {k: default for k in f2g.keys()}
if selected_features:
for f in selected_features:
if not (f in f2g.keys() or f in g2f.keys()):
msg = (
f"couldn't find your desired feature/group '{f}', please "
"check spelling"
)
logging.error(msg)
raise ValueError(msg)
if f in g2f.keys(): # the opt-in request is for a feature group
kwargs[f] = True
for feature in g2f[f]:
kwargs[feature] = kwargs.get(feature, True)
final_filter[feature] = True
elif f in f2g.keys(): # the opt-in request is for a certain feature
if not kwargs.get(f2g[f], True):
msg = (
f"feature group: {f2g[f]} has to be opt-in based on "
f"your opt-in request of feature: {f}"
)
logging.error(msg)
raise ValueError(msg)
if not kwargs.get(f, True):
msg = f"requested to both opt-in and opt-out feature: {f}"
logging.error(msg)
raise ValueError(msg)
kwargs[f2g[f]] = True # need to opt-in the feature group first
kwargs[f] = True # opt-in the feature
final_filter[f] = True
# final filter for filtering out features user didn't request and
# keep only the requested ones
final_filter.update(kwargs)
return final_filter, default
def _set_defaults(self, kwargs: Dict[str, Any], default: bool) -> None:
# setting default value for the switches of calculating the group of features
self.stl_features = kwargs.get("stl_features", default)
self.level_shift_features = kwargs.get("level_shift_features", default)
self.acfpacf_features = kwargs.get("acfpacf_features", default)
self.special_ac = kwargs.get("special_ac", default)
self.holt_params = kwargs.get("holt_params", default)
self.hw_params = kwargs.get("hw_params", default)
self.statistics = kwargs.get("statistics", default)
self.cusum_detector = kwargs.get("cusum_detector", False)
self.robust_stat_detector = kwargs.get("robust_stat_detector", False)
self.bocp_detector = kwargs.get("bocp_detector", False)
self.outlier_detector = kwargs.get("outlier_detector", False)
self.trend_detector = kwargs.get("trend_detector", False)
self.nowcasting = kwargs.get("nowcasting", False)
self.seasonalities = kwargs.get("seasonalities", False)
self.time = kwargs.get("time", False)
# For lower level of the features
self.__kwargs__ = kwargs
self.default = default
def _setup(
self, spectral_freq: int, window_size: int, nbins: int, lag_size: int
) -> None:
self.statistics_features = {
"length": TsFeatures.get_length,
"mean": TsFeatures.get_mean,
"var": TsFeatures.get_var,
"entropy": partial(TsFeatures.get_spectral_entropy, freq=spectral_freq),
"lumpiness": partial(TsFeatures.get_lumpiness, window_size=window_size),
"stability": partial(TsFeatures.get_stability, window_size=window_size),
"flat_spots": partial(TsFeatures.get_flat_spots, nbins=nbins),
"hurst": partial(TsFeatures.get_hurst, lag_size=lag_size),
"std1st_der": TsFeatures.get_std1st_der,
"crossing_points": TsFeatures.get_crossing_points,
"binarize_mean": TsFeatures.get_binarize_mean,
"unitroot_kpss": TsFeatures.get_unitroot_kpss,
"heterogeneity": TsFeatures.get_het_arch,
"histogram_mode": partial(TsFeatures.get_histogram_mode, nbins=nbins),
"linearity": TsFeatures.get_linearity,
}
def _compile_methods(self) -> None:
"""Map method names to method instances for _transform_1d."""
for method, _ in _ALL_TS_FEATURES:
method_name = f"get_{method}"
func = vars(TsFeatures).get(method_name, None)
assert func is not None, (
"Internal error: ",
f"TsFeatures.{method_name} does not exist",
)
if isinstance(func, staticmethod):
func = getattr(TsFeatures, method_name)
else:
func = getattr(self, method_name)
assert func is not None
if "x" in inspect.signature(func).parameters:
methods = self._x_methods
else:
methods = self._ts_methods
methods[method] = partial(
func, extra_args=self.__kwargs__, default_status=self.default
)
def transform(
self, x: TimeSeriesData
) -> Union[Dict[str, float], List[Dict[str, float]]]:
"""
The overall high-level function for transforming
time series into a number of features
Args:
x: Kats TimeSeriesData object.
Returns:
Returning maps (dictionary) with feature name and value pair.
For univariate input return a map of {feature: value}.
For multivariate input return a list of maps.
"""
if len(x) < 5:
msg = "Length of time series is too short to calculate features"
logging.error(msg)
raise ValueError(msg)
if type(x.value.values) != np.ndarray:
logging.warning(
"expecting values to be a np.ndarray, instead got "
f"{type(x.value.values)}"
)
# make sure that values are numpy array for feeding to Numba
df = pd.DataFrame(
{"time": x.time.values, "value": np.array(x.value.values, dtype=float)}
)
x = TimeSeriesData(df)
if len(x.value.shape) == 1:
# a single Series: return a map of {feature: value}
ts_values = x.value.values
ts_features = self._transform_1d(ts_values, x)
else:
# multiple time series: return a list of map {feature: value}
ts_features = []
for col in x.value.columns:
ts_values = x.value[col].values # extract 1-d numpy array
ts_features.append(self._transform_1d(ts_values, x.value[col]))
# performing final filter
to_remove = []
for feature in ts_features:
if not self.final_filter[feature]:
to_remove.append(feature)
for r in to_remove:
del ts_features[r]
return ts_features
def _transform_1d(self, x: np.ndarray, ts: TimeSeriesData) -> Dict[str, float]:
"""
Transform single (univariate) time series
Args:
x: The univariate time series array in the form of 1d numpy array.
ts: The univariate time series array.
Returns:
The dictionary with all the features aggregated from the outputs of
each feature group calculator.
"""
features = {}
for method, params in _ALL_TS_FEATURES:
if getattr(self, method, False):
logging.info(f"Generating {method} features...")
params = {name: getattr(self, val) for name, val in params.items()}
func = self._x_methods.get(method, None)
if func is None:
func = self._ts_methods[method]
assert func is not None
more_features: Dict[str, float] = func(ts, **params)
else:
more_features: Dict[str, float] = func(x, **params)
logging.debug(f"...generated {more_features}")
features.update(more_features)
return features
# length
@staticmethod
@jit(nopython=True)
def get_length(x: np.ndarray) -> float:
"""
Getting the length of time series array.
Args:
x: The univariate time series array in the form of 1d numpy array.
Returns:
Length of the time series array.
"""
return len(x)
# mean
@staticmethod
@jit(nopython=True)
def get_mean(x: np.ndarray) -> float:
"""
Getting the average value of time series array.
Args:
x: The univariate time series array in the form of 1d numpy array.
Returns:
Average of the time series array.
"""
return np.mean(x)
# variance
@staticmethod
@jit(nopython=True)
def get_var(x: np.ndarray) -> float:
"""
Getting the variance of time series array.
Args:
x: The univariate time series array in the form of 1d numpy array.
Returns:
Variance of the time series array.
"""
return np.var(x)
# spectral entropy
@staticmethod
@jit(forceobj=True)
def get_spectral_entropy(x: np.ndarray, freq: int = 1) -> float:
"""
Getting normalized Shannon entropy of power spectral density.
PSD is calculated using scipy's periodogram.
Args:
x: The univariate time series array in the form of 1d numpy array.
freq: int; Frequency for calculating the PSD via scipy periodogram.
Returns:
Normalized Shannon entropy.
"""
# calculate periodogram
_, psd = periodogram(x, freq)
# calculate shannon entropy of normalized psd
psd_norm = psd / np.sum(psd)
entropy = np.nansum(psd_norm * np.log2(psd_norm))
return -(entropy / np.log2(psd_norm.size))
# lumpiness
@staticmethod
@jit(forceobj=True)
def get_lumpiness(x: np.ndarray, window_size: int = 20) -> float:
"""
Calculating the lumpiness of time series.
Lumpiness is defined as the variance of the chunk-wise variances.
Args:
x: The univariate time series array in the form of 1d numpy array.
window_size: int; Window size to split the data into chunks for getting
variances. Default value is 20.
Returns:
Lumpiness of the time series array.
"""
v = [np.var(x_w) for x_w in np.array_split(x, len(x) // window_size + 1)]
return np.var(v)
# stability
@staticmethod
@jit(forceobj=True)
def get_stability(x: np.ndarray, window_size: int = 20) -> float:
"""
Calculate the stability of time series.
Stability is defined as the variance of chunk-wise means.
Args:
x: The univariate time series array in the form of 1d numpy array.
window_size: int; Window size to split the data into chunks for getting
variances. Default value is 20.
Returns:
Stability of the time series array.
"""
v = [np.mean(x_w) for x_w in np.array_split(x, len(x) // window_size + 1)]
return np.var(v)
@staticmethod
# Numba bug on Python 3.7 only causes this to return only the first feature
# in dict_features. Disable until we drop 3.7 support.
# https://github.com/numba/numba/issues/7215
# @jit(forceobj=True)
def get_statistics(
x: np.ndarray,
dict_features: Optional[Dict[str, Callable[[np.ndarray], float]]] = None,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Calculate simple statistical features for a time series.
Args:
x: The univariate time series array in the form of 1d numpy array.
dict_features: A dictionary of partial methods to compute the features.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is
disabled.
default_status: Default status of the switch for calculate the
features or not.
Returns:
Many statistical features including entropy and crossing points.
"""
if extra_args is None:
extra_args = {}
if dict_features is None:
dict_features = {}
result = {}
for k, v in dict_features.items():
if extra_args.get(k, default_status):
result[k] = v(x)
return result
# STL decomposition based features
@staticmethod
@jit(forceobj=True)
def get_stl_features(
x: np.ndarray,
period: int = 7,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Calculate STL based features for a time series.
Args:
x: The univariate time series array in the form of 1d numpy array.
period: int; Period parameter for performing seasonality trend
decomposition using LOESS with statsmodels.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is
disabled.
default_status: Default status of the switch for calculate the
features or not.
Returns:
Seasonality features including strength of trend, seasonality,
spikiness, peak/trough.
"""
stl_features = {}
# STL decomposition
res = STL(x, period=period).fit()
# strength of trend
if extra_args is not None and extra_args.get("trend_strength", default_status):
stl_features["trend_strength"] = 1 - np.var(res.resid) / np.var(
res.trend + res.resid
)
# strength of seasonality
if extra_args is not None and extra_args.get(
"seasonality_strength", default_status
):
stl_features["seasonality_strength"] = 1 - np.var(res.resid) / np.var(
res.seasonal + res.resid
)
# spikiness: variance of the leave-one-out variances of the remainder component
if extra_args is not None and extra_args.get("spikiness", default_status):
resid_array = np.repeat(
np.array(res.resid)[:, np.newaxis], len(res.resid), axis=1
)
resid_array[np.diag_indices(len(resid_array))] = np.NaN
stl_features["spikiness"] = np.var(np.nanvar(resid_array, axis=0))
# location of peak
if extra_args is not None and extra_args.get("peak", default_status):
stl_features["peak"] = np.argmax(res.seasonal[:period])
# location of trough
if extra_args is not None and extra_args.get("trough", default_status):
stl_features["trough"] = np.argmin(res.seasonal[:period])
return stl_features
@staticmethod
@jit(forceobj=True)
@deprecated(version="0.2.0", reason="Renamed to get_level_shift_features")
def get_level_shift(
x: np.ndarray,
window_size: int = 20,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
return TsFeatures.get_level_shift_features(
x, window_size, extra_args, default_status
)
# Level shift
@staticmethod
@jit(forceobj=True)
def get_level_shift_features(
x: np.ndarray,
window_size: int = 20,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Calculate level shift features.
* level_shift_idx: Location of the maximum mean value difference,
between two consecutive sliding windows
* level_shift_size: Size of the maximum mean value difference,
between two consecutive sliding windows
Args:
x: The univariate time series array in the form of 1d numpy array.
window_size: int; Length of the sliding window.
extra_args: A dictionary containing information for disabling calculation
of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the features
or not.
Returns:
Level shift features including level_shift_idx, and level_shift_size
"""
level_shift_features = {"level_shift_idx": np.nan, "level_shift_size": np.nan}
if len(x) < window_size + 2:
msg = (
"Length of time series is shorter than window_size, unable to "
"calculate level shift features"
)
logging.error(msg)
return level_shift_features
sliding_idx = (np.arange(len(x))[None, :] + np.arange(window_size)[:, None])[
:, : len(x) - window_size + 1
]
means = np.mean(x[sliding_idx], axis=0)
mean_diff = np.abs(means[:-1] - means[1:])
if extra_args is not None and extra_args.get("level_shift_idx", default_status):
level_shift_features["level_shift_idx"] = np.argmax(mean_diff)
if extra_args is not None and extra_args.get(
"level_shift_size", default_status
):
level_shift_features["level_shift_size"] = mean_diff[np.argmax(mean_diff)]
return level_shift_features
# Flat spots
@staticmethod
@jit(forceobj=True)
def get_flat_spots(x: np.ndarray, nbins: int = 10) -> int:
"""
Getting flat spots: Maximum run-lengths across equally-sized segments of time series
Args:
x: The univariate time series array in the form of 1d numpy array.
nbins: int; Number of bins to segment time series data into.
Returns:
Maximum run-lengths across segmented time series array.
"""
if len(x) <= nbins:
msg = (
"Length of time series is shorter than nbins, unable to "
"calculate flat spots feature"
)
logging.error(msg)
return np.nan
max_run_length = 0
window_size = int(len(x) / nbins)
for i in range(0, len(x), window_size):
run_length = np.max(
[len(list(v)) for k, v in groupby(x[i : i + window_size])]
)
if run_length > max_run_length:
max_run_length = run_length
return max_run_length
# Hurst Exponent
@staticmethod
@jit(forceobj=True)
def get_hurst(x: np.ndarray, lag_size: int = 30) -> float:
"""
Getting: Hurst Exponent wiki: https://en.wikipedia.org/wiki/Hurst_exponent
Args:
x: The univariate time series array in the form of 1d numpy array.
lag_size: int; Size for getting lagged time series data.
Returns:
The Hurst Exponent of the time series array
"""
# Create the range of lag values
lags = range(2, min(lag_size, len(x) - 1))
# Calculate the array of the variances of the lagged differences
tau = [np.std(np.asarray(x)[lag:] - np.asarray(x)[:-lag]) for lag in lags]
# Use a linear fit to estimate the Hurst Exponent
poly = np.polyfit(np.log(lags), np.log(tau), 1)
# Return the Hurst exponent from the polyfit output
return poly[0] if not np.isnan(poly[0]) else 0
# ACF and PACF features
# ACF features
@staticmethod
@jit(forceobj=True)
def get_acf_features(
extra_args: Dict[str, bool],
default_status: bool,
y_acf_list: List[float],
diff1y_acf_list: List[float],
diff2y_acf_list: List[float],
) -> Tuple[float, float, float, float, float, float, float]:
"""
Aggregating extracted ACF features from get_acfpacf_features function.
Args:
extra_args: A dictionary containing information for disabling calculation
of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the
features or not.
y_acf_list: List of ACF values acquired from original time series.
diff1y_acf_list: List of ACF values acquired from differenced time series.
diff2y_acf_list: List of ACF values acquired from twice differenced
time series.
Returns:
Auto-correlation function (ACF) features.
"""
y_acf1 = y_acf5 = diff1y_acf1 = diff1y_acf5 = diff2y_acf1 = np.nan
diff2y_acf5 = seas_acf1 = np.nan
# y_acf1: first ACF value of the original series
if extra_args.get("y_acf1", default_status):
y_acf1 = y_acf_list[0]
# y_acf5: sum of squares of first 5 ACF values of original series
if extra_args.get("y_acf5", default_status):
y_acf5 = np.sum(np.asarray(y_acf_list)[:5] ** 2)
# diff1y_acf1: first ACF value of the differenced series
if extra_args.get("diff1y_acf1", default_status):
diff1y_acf1 = diff1y_acf_list[0]
# diff1y_acf5: sum of squares of first 5 ACF values of differenced series
if extra_args.get("diff1y_acf5", default_status):
diff1y_acf5 = np.sum(np.asarray(diff1y_acf_list)[:5] ** 2)
# diff2y_acf1: first ACF value of the twice-differenced series
if extra_args.get("diff2y_acf1", default_status):
diff2y_acf1 = diff2y_acf_list[0]
# diff2y_acf5: sum of squares of first 5 ACF values of twice-differenced series
if extra_args.get("diff2y_acf5", default_status):
diff2y_acf5 = np.sum(np.asarray(diff2y_acf_list)[:5] ** 2)
# Autocorrelation coefficient at the first seasonal lag.
if extra_args.get("seas_acf1", default_status):
seas_acf1 = y_acf_list[-1]
return (
y_acf1,
y_acf5,
diff1y_acf1,
diff1y_acf5,
diff2y_acf1,
diff2y_acf5,
seas_acf1,
)
# PACF features
@staticmethod
@jit(forceobj=True)
def get_pacf_features(
extra_args: Dict[str, bool],
default_status: bool,
y_pacf_list: List[float],
diff1y_pacf_list: List[float],
diff2y_pacf_list: List[float],
) -> Tuple[float, float, float, float]:
"""
Aggregating extracted PACF features from get_acfpacf_features function.
Args:
extra_args: A dictionary containing information for disabling calculation
of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the
features or not.
y_pacf_list: List of PACF values acquired from original time series.
diff1y_pacf_list: List of PACF values acquired from differenced time series.
diff2y_pacf_list: List of PACF values acquired from twice differenced
time series.
Returns:
Partial auto-correlation function (PACF) features.
"""
y_pacf5 = diff1y_pacf5 = diff2y_pacf5 = seas_pacf1 = np.nan
# y_pacf5: sum of squares of first 5 PACF values of original series
if extra_args.get("y_pacf5", default_status):
y_pacf5 = np.nansum(np.asarray(y_pacf_list)[:5] ** 2)
# diff1y_pacf5: sum of squares of first 5 PACF values of differenced series
if extra_args.get("diff1y_pacf5", default_status):
diff1y_pacf5 = np.nansum(np.asarray(diff1y_pacf_list)[:5] ** 2)
# diff2y_pacf5: sum of squares of first 5 PACF values of twice-differenced series
if extra_args.get("diff2y_pacf5", default_status):
diff2y_pacf5 = np.nansum(np.asarray(diff2y_pacf_list)[:5] ** 2)
# Patial Autocorrelation coefficient at the first seasonal lag.
if extra_args.get("seas_pacf1", default_status):
seas_pacf1 = y_pacf_list[-1]
return (
y_pacf5,
diff1y_pacf5,
diff2y_pacf5,
seas_pacf1,
)
@staticmethod
@jit(forceobj=True)
def get_acfpacf_features(
x: np.ndarray,
acfpacf_lag: int = 6,
period: int = 7,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Calculate ACF and PACF based features. Calculate seasonal ACF, PACF based features.
Reference: https://stackoverflow.com/questions/36038927/whats-the-difference-between-pandas-acf-and-statsmodel-acf
R code: https://cran.r-project.org/web/packages/tsfeatures/vignettes/tsfeatures.html
Paper: Meta-learning how to forecast time series
Args:
x: The univariate time series array in the form of 1d numpy array.
acfpacf_lag: int; Largest lag number for returning ACF/PACF features
via statsmodels.
period: int; Seasonal period.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the
features or not.
Returns:
Aggregated ACF, PACF features.
"""
acfpacf_features = {
"y_acf1": np.nan,
"y_acf5": np.nan,
"diff1y_acf1": np.nan,
"diff1y_acf5": np.nan,
"diff2y_acf1": np.nan,
"diff2y_acf5": np.nan,
"y_pacf5": np.nan,
"diff1y_pacf5": np.nan,
"diff2y_pacf5": np.nan,
"seas_acf1": np.nan,
"seas_pacf1": np.nan,
}
if len(x) < 10 or len(x) < period or len(np.unique(x)) == 1:
msg = (
"Length is shorter than period, or constant time series, "
"unable to calculate acf/pacf features"
)
logging.error(msg)
return acfpacf_features
nlag = min(acfpacf_lag, len(x) - 2)
diff1x = [x[i] - x[i - 1] for i in range(1, len(x))]
diff2x = [diff1x[i] - diff1x[i - 1] for i in range(1, len(diff1x))]
y_acf_list = acf(x, fft=True, nlags=period)[1:]
diff1y_acf_list = acf(diff1x, fft=True, nlags=nlag)[1:]
diff2y_acf_list = acf(diff2x, fft=True, nlags=nlag)[1:]
y_pacf_list = pacf(x, nlags=period)[1:]
diff1y_pacf_list = pacf(diff1x, nlags=nlag)[1:]
diff2y_pacf_list = pacf(diff2x, nlags=nlag)[1:]
# getting ACF features
(
acfpacf_features["y_acf1"],
acfpacf_features["y_acf5"],
acfpacf_features["diff1y_acf1"],
acfpacf_features["diff1y_acf5"],
acfpacf_features["diff2y_acf1"],
acfpacf_features["diff2y_acf5"],
acfpacf_features["seas_acf1"],
) = TsFeatures.get_acf_features(
extra_args,
default_status,
y_acf_list,
diff1y_acf_list,
diff2y_acf_list,
)
# getting PACF features
(
acfpacf_features["y_pacf5"],
acfpacf_features["diff1y_pacf5"],
acfpacf_features["diff2y_pacf5"],
acfpacf_features["seas_pacf1"],
) = TsFeatures.get_pacf_features(
extra_args,
default_status,
y_pacf_list,
diff1y_pacf_list,
diff2y_pacf_list,
)
return acfpacf_features
# standard deviation of the first derivative
@staticmethod
@jit(forceobj=True)
def get_std1st_der(x: np.ndarray) -> float:
"""
Calculate the standard deviation of the first derivative of the time series.
Reference: https://cran.r-project.org/web/packages/tsfeatures/vignettes/tsfeatures.html
Args:
x: The univariate time series array in the form of 1d numpy array.
Returns:
The standard deviation of the first derivative of the time series.
"""
return np.std(np.gradient(x))
# crossing points
@staticmethod
@jit(nopython=True)
def get_crossing_points(x: np.ndarray) -> float:
"""
Calculate the number of crossing points.
Crossing points happen when a time series crosses the median line.
Reference: https://cran.r-project.org/web/packages/tsfeatures/vignettes/tsfeatures.html
Args:
x: The univariate time series array in the form of 1d numpy array.
Returns:
The number of times a time series crosses the median line.
"""
median = np.median(x)
cp = 0
for i in range(len(x) - 1):
if x[i] <= median < x[i + 1] or x[i] >= median > x[i + 1]:
cp += 1
return cp
# binarize mean
@staticmethod
@jit(nopython=True)
def get_binarize_mean(x: np.ndarray) -> float:
"""
Converts time series array into a binarized version.
Time-series values above its mean are given 1, and those below the mean
are 0. Returns the average value of the binarized vector.
Reference: https://cran.r-project.org/web/packages/tsfeatures/vignettes/tsfeatures.html
Args:
x: The univariate time series array in the form of 1d numpy array.
Returns:
The binarized version of time series array.
"""
return np.mean(np.asarray(x) > np.mean(x))
# KPSS unit root test
@staticmethod
@jit(forceobj=True)
def get_unitroot_kpss(x: np.ndarray) -> float:
"""
Get the test statistic based on KPSS test.
Test a null hypothesis that an observable time series is stationary
around a deterministic trend. A vector comprising the statistic for the
KPSS unit root test with linear trend and lag one
Wiki: https://en.wikipedia.org/wiki/KPSS_test
Args:
x: The univariate time series array in the form of 1d numpy array.
Returns:
Test statistics acquired using KPSS test.
"""
return kpss(x, regression="ct", nlags=1)[0]
# heterogeneity
@staticmethod
@jit(forceobj=True)
def get_het_arch(x: np.ndarray) -> float:
"""
Compute Engle's test for autogregressive Conditional Heteroscedasticity (ARCH).
reference: https://www.statsmodels.org/dev/generated/statsmodels.stats.diagnostic.het_arch.html
Engle’s Test for Autoregressive Conditional Heteroscedasticity (ARCH)
Args:
x: The univariate time series array in the form of 1d numpy array.
Returns:
Lagrange multiplier test statistic
"""
return het_arch(x, nlags=min(10, len(x) // 5))[0]
# histogram mode
@staticmethod
@jit(nopython=True)
def get_histogram_mode(x: np.ndarray, nbins: int = 10) -> float:
"""
Measures the mode of the data vector using histograms with a given number of bins.
Reference: https://cran.r-project.org/web/packages/tsfeatures/vignettes/tsfeatures.html
Args:
x: The univariate time series array in the form of 1d numpy array.
nbins: int; Number of bins to get the histograms. Default value is 10.
Returns:
Mode of the data vector using histograms.
"""
cnt, val = np.histogram(x, bins=nbins)
return val[cnt.argmax()]
# First min/zero AC (2)
@staticmethod
@jit(forceobj=True)
def get_special_ac(
x: np.ndarray,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Compute special_ac features.
firstmin_ac: the time of first minimum in the autocorrelation function
firstzero_ac: the time of first zero crossing the autocorrelation function.
Args:
x: The univariate time series array in the form of 1d numpy array.
extra_args: A dictionary containing information for disabling calculation
of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the features.
Returns:
Special autocorrelation features described above.
"""
# First min AC
special_ac_features = {"firstmin_ac": np.nan, "firstzero_ac": np.nan}
AC = acf(x, fft=True, nlags=len(x))[1:]
if extra_args is not None and extra_args.get("firstmin_ac", default_status):
i = 0
while i < len(AC) - 1:
if AC[i] > AC[i + 1]:
i += 1
else:
break
special_ac_features["firstmin_ac"] = i + 1
# First zero AC
if extra_args is not None and extra_args.get("firstzero_ac", default_status):
j = 0
while j < len(AC) - 1:
if AC[j] > 0 > AC[j + 1]:
break
else:
j += 1
special_ac_features["firstzero_ac"] = j + 2
return special_ac_features
# Linearity
@staticmethod
@jit(forceobj=True)
def get_linearity(x: np.ndarray) -> float:
"""
Compute linearity feature: R square from a fitted linear regression.
Args:
x: The univariate time series array in the form of 1d numpy array.
Returns:
R square from a fitted linear regression.
"""
_, _, r_value, _, _ = stats.linregress(np.arange(len(x)), x)
return r_value ** 2
# Holt Parameters (2)
@staticmethod
def get_holt_params(
x: np.ndarray,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Estimates the smoothing parameters for Holt's linear trend model.
* 'alpha': Level parameter of the Holt model.
* 'beta': Trend parameter of the Hold model.
Args:
x: The univariate time series array in the form of 1d numpy array.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the
features or not.
Returns:
Level and trend parameter of a fitted Holt model.
"""
holt_params_features = {"holt_alpha": np.nan, "holt_beta": np.nan}
try:
m = ExponentialSmoothing(x, trend="add", seasonal=None).fit()
if extra_args is not None and extra_args.get("holt_alpha", default_status):
holt_params_features["holt_alpha"] = m.params["smoothing_level"]
if extra_args is not None and extra_args.get("holt_beta", default_status):
holt_params_features["holt_beta"] = m.params["smoothing_trend"]
except Exception as e:
logging.warning(f"Holt Linear failed {e}")
return holt_params_features
# Holt Winter’s Parameters (3)
@staticmethod
def get_hw_params(
x: np.ndarray,
period: int = 7,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Estimates the smoothing parameters for HW linear trend.
Args:
x: The univariate time series array in the form of 1d numpy array.
period: int; Seaonal period for fitting exponential smoothing model.
extra_args: A dictionary containing information for disabling calculation
of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the
features or not.
Returns:
Level, trend and seasonal parameter of a fitted Holt-Winter's model.
"""
hw_params_features = {"hw_alpha": np.nan, "hw_beta": np.nan, "hw_gamma": np.nan}
try:
m = ExponentialSmoothing(
x,
initialization_method="estimated",
seasonal="add",
seasonal_periods=period,
trend="add",
use_boxcox=True,
).fit()
if extra_args is not None:
if extra_args.get("hw_alpha", default_status):
hw_params_features["hw_alpha"] = m.params["smoothing_level"]
if extra_args.get("hw_beta", default_status):
hw_params_features["hw_beta"] = m.params["smoothing_trend"]
if extra_args.get("hw_gamma", default_status):
hw_params_features["hw_gamma"] = m.params["smoothing_seasonal"]
except Exception as e:
logging.warning(f"Holt-Winters failed {e}")
return hw_params_features
# CUSUM Detection Outputs (8)
@staticmethod
def get_cusum_detector(
ts: TimeSeriesData,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Extract features from the outputs of the Kats CUSUM Detector.
Args:
ts: The univariate time series array.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the features.
Returns:
Outputs of the CUSUM Detector, which include (1) Number of changepoints,
either 1 or 0, (2) Confidence of the changepoint detected, 0 if not
changepoint, (3) index, or position of the changepoint detected
within the time series, (4) delta of the mean levels before and after
the changepoint, (5) log-likelihood ratio of changepoint, (6)
Boolean - whether regression is detected by CUSUM, (7) Boolean -
whether changepoint is stable, (8) p-value of changepoint.
"""
cusum_detector_features = {
"cusum_num": np.nan,
"cusum_conf": np.nan,
"cusum_cp_index": np.nan,
"cusum_delta": np.nan,
"cusum_llr": np.nan,
"cusum_regression_detected": np.nan,
"cusum_stable_changepoint": np.nan,
"cusum_p_value": np.nan,
}
try:
cusum = cusum_detection.CUSUMDetector(ts)
cusum_cp = cusum.detector()
cp = None if len(cusum_cp) == 0 else cusum_cp[0]
if extra_args is not None and extra_args.get("cusum_num", default_status):
cusum_detector_features["cusum_num"] = len(cusum_cp)
if extra_args is not None and extra_args.get("cusum_conf", default_status):
cusum_detector_features["cusum_conf"] = (
0 if cp is None else cp.confidence
)
if extra_args is not None and extra_args.get(
"cusum_cp_index", default_status
):
cusum_detector_features["cusum_cp_index"] = (
0 if cp is None else cp.cp_index / len(ts)
)
if extra_args is not None and extra_args.get("cusum_delta", default_status):
cusum_detector_features["cusum_delta"] = 0 if cp is None else cp.delta
if extra_args is not None and extra_args.get("cusum_llr", default_status):
cusum_detector_features["cusum_llr"] = 0 if cp is None else cp.llr
if extra_args is not None and extra_args.get(
"cusum_regression_detected", default_status
):
cusum_detector_features["cusum_regression_detected"] = (
False if cp is None else cp.regression_detected
)
if extra_args is not None and extra_args.get(
"cusum_stable_changepoint", default_status
):
cusum_detector_features["cusum_stable_changepoint"] = (
False if cp is None else cp.stable_changepoint
)
if extra_args is not None and extra_args.get(
"cusum_p_value", default_status
):
cusum_detector_features["cusum_p_value"] = (
0 if cp is None else cp.p_value
)
except Exception as e:
logging.warning(f"Cusum Detector failed {e}")
return cusum_detector_features
# Robust Stat Detection Outputs (2)
@staticmethod
def get_robust_stat_detector(
ts: TimeSeriesData,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Extract features from the outputs of the Kats Robust Stat Detector.
Args:
ts: The univariate time series array.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the features.
Returns:
(1) Number changepoints detected by the Robust Stat Detector, and
(2) Mean of the Metric values from the Robust Stat Detector.
"""
robust_stat_detector_features = {
"robust_num": np.nan,
"robust_metric_mean": np.nan,
}
try:
robust = robust_stat_detection.RobustStatDetector(ts)
robust_cp = robust.detector()
if extra_args is not None and extra_args.get("robust_num", default_status):
robust_stat_detector_features["robust_num"] = len(robust_cp)
if extra_args is not None and extra_args.get(
"robust_metric_mean", default_status
):
ncp = len(robust_cp)
if ncp == 0:
robust_stat_detector_features["robust_metric_mean"] = np.nan
else:
metric = 0.0
for cp in robust_cp:
metric += cp.metric
robust_stat_detector_features["robust_metric_mean"] = metric / ncp
except Exception as e:
logging.warning(f"Robust Stat Detector failed {e}")
return robust_stat_detector_features
# BOCP Detection Outputs (3)
@staticmethod
def get_bocp_detector(
ts: TimeSeriesData,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Extract features from the output of the Kats BOCP Detector.
Args:
ts: The univariate time series.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the features.
Returns:
(tuple): tuple containing:
Number of changepoints detected by BOCP Detector
Max value of the confidence of the changepoints detected
Mean value of the confidence of the changepoints detected.
"""
bocp_detector_features = {
"bocp_num": np.nan,
"bocp_conf_max": np.nan,
"bocp_conf_mean": np.nan,
}
try:
bocp = bocpd.BOCPDetector(ts)
bocp_cp = bocp.detector(choose_priors=False)
if extra_args is not None and extra_args.get("bocp_num", default_status):
bocp_detector_features["bocp_num"] = len(bocp_cp)
if extra_args is not None and extra_args.get(
"bocp_conf_max", default_status
):
bocp_detector_features["bocp_conf_max"] = (
0
if len(bocp_cp) == 0
else np.max([cp.confidence for cp in bocp_cp])
)
if extra_args is not None and extra_args.get(
"bocp_conf_mean", default_status
):
bocp_detector_features["bocp_conf_mean"] = (
0
if len(bocp_cp) == 0
else np.mean([cp.confidence for cp in bocp_cp])
)
except Exception as e:
logging.warning(f"BOCPDetector failed {e}")
return bocp_detector_features
# Outlier Detection Outputs (1)
@staticmethod
def get_outlier_detector(
ts: TimeSeriesData,
decomp: str = "additive",
iqr_mult: float = 3.0,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Extract features from the output of the Kats Outlier Detector.
Args:
ts: The univariate time series.
decomp: str; Additive or Multiplicative mode for performing outlier
detection using OutlierDetector.
iqr_mult: float; IQR range for determining outliers through
OutlierDetector.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the features.
Returns:
Number of outliers by the Outlier Detector.
"""
outlier_detector_features = {"outlier_num": np.nan}
try:
odetector = outlier.OutlierDetector(ts, decomp=decomp, iqr_mult=iqr_mult)
odetector.detector()
if extra_args is not None and extra_args.get("outlier_num", default_status):
outliers = odetector.outliers
assert outliers is not None and len(outliers) > 0
outlier_detector_features["outlier_num"] = len(outliers[0])
except Exception as e:
logging.warning(f"OutlierDetector failed {e}")
return outlier_detector_features
# Trend Detection Outputs (3)
@staticmethod
def get_trend_detector(
ts: TimeSeriesData,
threshold: float = 0.8,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Extract features from the output of the Kats Trend Detector.
Args:
ts: The univariate time series.
threshold: float; threshold for trend intensity; higher threshold
gives trend with high intensity (0.8 by default). If we only
want to use the p-value to determine changepoints, set threshold = 0.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the features.
Returns:
(1) Number of trends detected by the Kats Trend Detector, (2) Number
of increasing trends, (3) Mean of the abolute values of Taus of the
trends detected.
"""
trend_detector_features = {
"trend_num": np.nan,
"trend_num_increasing": np.nan,
"trend_avg_abs_tau": np.nan,
}
try:
tdetector = trend_mk.MKDetector(data=ts, threshold=threshold)
tdetected_time_points = tdetector.detector(direction="both")
if extra_args is not None and extra_args.get("trend_num", default_status):
trend_detector_features["trend_num"] = len(tdetected_time_points)
if extra_args is not None and extra_args.get(
"trend_num_increasing", default_status
):
num_decreasing = 0
for p in tdetected_time_points:
if p.trend_direction == "decreasing":
num_decreasing += 1
trend_detector_features["trend_num_increasing"] = num_decreasing
if extra_args is not None and extra_args.get(
"trend_avg_abs_tau", default_status
):
npoints = len(tdetected_time_points)
if npoints == 0:
trend_detector_features["trend_avg_abs_tau"] = 0
else:
tau = 0.0
for p in tdetected_time_points:
if isinstance(p.Tau, float):
tau += abs(p.Tau)
trend_detector_features["trend_avg_abs_tau"] = tau / npoints
except Exception as e:
logging.warning(f"Trend Detector failed {e}")
return trend_detector_features
@staticmethod
@jit(nopython=True)
def _ewma(arr: np.ndarray, span: int, min_periods: int) -> np.ndarray:
"""
Exponentialy weighted moving average specified by a decay ``window``
to provide better adjustments for small windows via:
y[t] = (x[t] + (1-a)*x[t-1] + (1-a)^2*x[t-2] + ... + (1-a)^n*x[t-n]) /
(1 + (1-a) + (1-a)^2 + ... + (1-a)^n).
Args:
arr : np.ndarray; A single dimenisional numpy array.
span : int; The decay window, or 'span'.
min_periods: int; Minimum amount of data points we'd like to include
in the output.
Returns:
A np.ndarray. The exponentially weighted moving average of the array.
"""
output_array = np.empty(arr.shape[0], dtype=np.float64)
output_array[:] = np.NaN
arr = arr[~np.isnan(arr)]
n = arr.shape[0]
ewma = np.empty(n, dtype=np.float64)
alpha = 2 / float(span + 1)
w = 1
ewma_old = arr[0]
ewma[0] = ewma_old
for i in range(1, n):
w += (1 - alpha) ** i
ewma_old = ewma_old * (1 - alpha) + arr[i]
ewma[i] = ewma_old / w
output_subset = ewma[(min_periods - 1) :]
output_array[-len(output_subset) :] = output_subset
return output_array
@staticmethod
@jit(forceobj=True)
def _get_nowcasting_np(
x: np.ndarray,
window: int = 5,
n_fast: int = 12,
n_slow: int = 21,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Sequence[float]:
"""
Perform feature engineering using the same procedure as nowcasting.
Args:
x: The univariate time series array in the form of 1d numpy array.
window: int; Length of window size for all Nowcasting features.
n_fast: int; length of "fast" or short period exponential moving
average in the MACD algorithm in the nowcasting features.
n_slow: int; length of "slow" or long period exponential moving
average in the MACD algorithm in the nowcasting features.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the features.
Returns:
A list containing extracted nowcast features.
"""
# initializing the outputs
nowcasting_features = [np.nan for _ in range(7)]
# ROC: indicating return comparing to step n back.
if extra_args is not None and extra_args.get("nowcast_roc", default_status):
M = x[(window - 1) :] - x[: -(window - 1)]
N = x[: -(window - 1)]
arr = M / N
nowcasting_features[0] = np.nan_to_num(
arr, nan=0.0, posinf=0.0, neginf=0.0
).mean()
# MOM: indicating momentum: difference of current value and n steps back.
if extra_args is not None and extra_args.get("nowcast_mom", default_status):
M = x[window:] - x[:-window]
nowcasting_features[1] = np.nan_to_num(
M, nan=0.0, posinf=0.0, neginf=0.0
).mean()
# MA: indicating moving average in the past n steps.
if extra_args is not None and extra_args.get("nowcast_ma", default_status):
ret = np.cumsum(x, dtype=float)
ret[window:] = ret[window:] - ret[:-window]
ma = ret[window - 1 :] / window
nowcasting_features[2] = np.nan_to_num(
ma, nan=0.0, posinf=0.0, neginf=0.0
).mean()
# LAG: indicating lagged value at the past n steps.
if extra_args is not None and extra_args.get("nowcast_lag", default_status):
N = x[:-window]
nowcasting_features[3] = np.nan_to_num(
N, nan=0.0, posinf=0.0, neginf=0.0
).mean()
# MACD: https://www.investopedia.com/terms/m/macd.asp.
ema_fast = TsFeatures._ewma(x, n_fast, n_slow - 1)
ema_slow = TsFeatures._ewma(x, n_slow, n_slow - 1)
MACD = ema_fast - ema_slow
if extra_args is not None and extra_args.get("nowcast_macd", default_status):
nowcasting_features[4] = np.nan_to_num(
np.nanmean(MACD), nan=0.0, posinf=0.0, neginf=0.0
)
if len(x) >= 27:
MACDsign = TsFeatures._ewma(MACD, 9, 8)
if extra_args is not None and extra_args.get(
"nowcast_macdsign", default_status
):
nowcasting_features[5] = np.nan_to_num(
np.nanmean(MACDsign), nan=0.0, posinf=0.0, neginf=0.0
)
MACDdiff = MACD - MACDsign
if extra_args is not None and extra_args.get(
"nowcast_macddiff", default_status
):
nowcasting_features[6] = np.nan_to_num(
np.nanmean(MACDdiff), nan=0.0, posinf=0.0, neginf=0.0
)
return nowcasting_features
# Nowcasting features (7)
@staticmethod
def get_nowcasting(
x: np.ndarray,
window: int = 5,
n_fast: int = 12,
n_slow: int = 21,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Extract aggregated features from the output of the Kats nowcasting transformer.
Args:
x: The univariate time series array in the form of 1d numpy array.
window: int; Length of window size for all Nowcasting features.
n_fast: int; length of "fast" or short period exponential moving
average in the MACD algorithm in the nowcasting features.
n_slow: int; length of "slow" or long period exponential moving
average in the MACD algorithm in the nowcasting features.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. Default value is None, i.e. no
feature is disabled.
default_status: Default status of the switch for calculate the features.
Returns:
Mean values of the Kats Nowcasting algorithm time series outputs
using the parameters
(window, n_fast, n_slow) indicated above. These outputs include:
(1) Mean of Rate of Change (ROC) time series, (2) Mean of Moving
Average (MA) time series,(3) Mean of Momentum (MOM) time series,
(4) Mean of LAG time series, (5) Means of MACD, MACDsign, and
MACDdiff from Kats Nowcasting.
"""
nowcasting_features = {}
features = [
"nowcast_roc",
"nowcast_mom",
"nowcast_ma",
"nowcast_lag",
"nowcast_macd",
"nowcast_macdsign",
"nowcast_macddiff",
]
for feature in features:
if extra_args is not None and extra_args.get(feature, default_status):
nowcasting_features[feature] = np.nan
try:
_features = TsFeatures._get_nowcasting_np(
x, window, n_fast, n_slow, extra_args, default_status
)
for idx, feature in enumerate(features):
if extra_args is not None and extra_args.get(feature, default_status):
nowcasting_features[feature] = _features[idx]
except Exception as e:
logging.warning(f"Nowcasting failed {e}")
if len(x) < 27:
logging.warning(
f"MACDsign couldn't get computed successfully due to insufficient time series length: {len(x)}"
)
return nowcasting_features
# seasonality features (4)
@staticmethod
def get_seasonalities(
ts: TimeSeriesData,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Extract features from the output of the Kats seaonality detectors.
Args:
ts: The univariate time series.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the features.
Returns:
Returns the detected seasonality period.
Slope acquired via fitting simple linear regression model on the trend component
as trend magnitude.
Difference between the 95 percentile and 5 percentile of the seasonal component
as the seasonality magnitude.
Standard deviation of the residual component.
"""
seasonality_features = {
"seasonal_period": np.nan,
"trend_mag": np.nan,
"seasonality_mag": np.nan,
"residual_std": np.nan,
}
try:
# detrending for period estimation
detrended = TimeSeriesData(
pd.DataFrame(
{
"time": len(ts.value.values) - 1,
"value": ts.value.values[1:] - ts.value.values[:-1],
}
)
)
detected = seasonality.FFTDetector(detrended).detector()
if detected["seasonality_presence"]:
_period = int(np.min(detected["seasonalities"]))
else:
_period = 7
res = STL(ts.value.values, period=_period).fit()
if extra_args is not None and extra_args.get(
"seasonal_period", default_status
):
seasonality_features["seasonal_period"] = _period
# getting seasonality magnitude
if extra_args is not None and extra_args.get(
"seasonality_mag", default_status
):
seasonality_features["seasonality_mag"] = np.round(
np.quantile(res.seasonal, 0.95) - np.quantile(res.seasonal, 0.05)
)
# fitting linear regression for trend magnitude
if extra_args is not None and extra_args.get("trend_mag", default_status):
exog = res.trend
_series = exog - exog[0]
mod = sm.OLS(_series, np.arange(len(_series)))
_res = mod.fit()
# trend magnitude
seasonality_features["trend_mag"] = _res.params[0]
# residual standard deviation
if extra_args is not None and extra_args.get(
"residual_std", default_status
):
seasonality_features["residual_std"] = np.std(res.resid)
except Exception as e:
logging.warning(f"Seasonality failed {e}")
return seasonality_features
# time features (16)
@staticmethod
def get_time( # noqa C901
ts: TimeSeriesData,
extra_args: Optional[Dict[str, bool]] = None,
default_status: bool = True,
) -> Dict[str, float]:
"""
Extract features from the time values of the time series.
Args:
ts: The time series.
extra_args: A dictionary containing information for disabling
calculation of a certain feature. If None, no feature is disabled.
default_status: Default status of the switch for calculate the features.
Returns:
The count of years, months, weeks, days, distinct months of year,
distinct weeks of year, distinct days of year, frequency of each day
of the week, and average timezone UTC offset in the time series.
"""
time_features = {
"time_years": np.nan,
"time_months": np.nan,
"time_monthsofyear": np.nan,
"time_weeks": np.nan,
"time_weeksofyear": np.nan,
"time_days": np.nan,
"time_daysofyear": np.nan,
"time_avg_timezone_offset": np.nan,
"time_length_days": np.nan,
"time_freq_Monday": np.nan,
"time_freq_Tuesday": np.nan,
"time_freq_Wednesday": np.nan,
"time_freq_Thursday": np.nan,
"time_freq_Friday": np.nan,
"time_freq_Saturday": np.nan,
"time_freq_Sunday": np.nan,
}
try:
n = len(ts)
index = ts.time_to_index()
dow = index.dayofweek
if extra_args is not None and extra_args.get("time_years", default_status):
time_features["time_years"] = index.year.nunique()
if extra_args is not None and extra_args.get("time_months", default_status):
time_features["time_months"] = index.strftime("%Y-%m").nunique()
if extra_args is not None and extra_args.get(
"time_monthsofyear", default_status
):
time_features["time_monthsofyear"] = index.month.nunique()
if extra_args is not None and extra_args.get("time_weeks", default_status):
time_features["time_weeks"] = index.strftime("%G-%V").nunique()
if extra_args is not None and extra_args.get(
"time_weeksofyear", default_status
):
time_features["time_weeksofyear"] = index.weekofyear.nunique()
if extra_args is not None and extra_args.get("time_days", default_status):
time_features["time_days"] = index.strftime("%Y-%d").nunique()
if extra_args is not None and extra_args.get(
"time_daysofyear", default_status
):
time_features["time_daysofyear"] = index.dayofyear.nunique()
if extra_args is not None and extra_args.get(
"time_avg_timezone_offset", default_status
):
try:
utcoffsets = [dt.utcoffset().total_seconds() for dt in index]
time_features["time_avg_timezone_offset"] = np.array(
utcoffsets
).mean()
except AttributeError:
time_features["time_avg_timezone_offset"] = 0.0
if extra_args is not None and extra_args.get(
"time_length_days", default_status
):
time_features["time_length_days"] = (index.max() - index.min()).days
if extra_args is not None and extra_args.get(
"time_freq_Monday", default_status
):
time_features["time_freq_Monday"] = (dow == 0).sum() / n
if extra_args is not None and extra_args.get(
"time_freq_Tuesday", default_status
):
time_features["time_freq_Tuesday"] = (dow == 1).sum() / n
if extra_args is not None and extra_args.get(
"time_freq_Wednesday", default_status
):
time_features["time_freq_Wednesday"] = (dow == 2).sum() / n
if extra_args is not None and extra_args.get(
"time_freq_Thursday", default_status
):
time_features["time_freq_Thursday"] = (dow == 3).sum() / n
if extra_args is not None and extra_args.get(
"time_freq_Friday", default_status
):
time_features["time_freq_Friday"] = (dow == 4).sum() / n
if extra_args is not None and extra_args.get(
"time_freq_Saturday", default_status
):
time_features["time_freq_Saturday"] = (dow == 5).sum() / n
if extra_args is not None and extra_args.get(
"time_freq_Sunday", default_status
):
time_features["time_freq_Sunday"] = (dow == 6).sum() / n
except Exception as e:
logging.warning(f"Time failed {e}")
return time_features
|
the-stack_106_29416 | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from extensions.ops.ctc_greedy_decoder import CTCGreedyDecoderOp
from mo.front.caffe.collect_attributes import merge_attrs
from mo.front.common.extractors.utils import layout_attrs
from mo.front.extractor import FrontExtractorOp
class CTCGreedyDecoderFrontExtractor(FrontExtractorOp):
op = 'CTCGreedyDecoder'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.ctc_decoder_param
update_attrs = {
'ctc_merge_repeated': (int)(param.ctc_merge_repeated)
}
mapping_rule = merge_attrs(param, update_attrs)
mapping_rule.update(layout_attrs())
# update the attributes of the node
CTCGreedyDecoderOp.update_node_stat(node, mapping_rule)
return cls.enabled
|
the-stack_106_29417 | class Solution(object):
def twoSum(self, nums, target):
numbers = dict()
for key, value in enumerate(nums):
numbers[value] = key
solution = list()
for index in range(len(nums)):
value = nums[index]
total = target - value
temp = numbers.get(total, None)
if temp is not None and temp != index:
solution = list([index, temp])
return solution
|
the-stack_106_29418 | """A script converting old ontologies to new ones.
Old ontology typically have entity names in ALL_UPPERCASE.
Newer ontology usually have entity names in CamelCase.
"""
import argparse
import yaml
import logging
import re
import shutil
import os
from copy import deepcopy
from pathlib import Path
from osp.core.ontology.parser.yml.keywords import NAMESPACE_KEY, ONTOLOGY_KEY,\
SUPERCLASSES_KEY, REQUIREMENTS_KEY
entity_name_regex = r"(_|[A-Z])([A-Z]|[0-9]|_)*"
entity_name_pattern = re.compile(r"^%s$" % entity_name_regex)
qualified_entity_name_pattern = re.compile(
r"^%s\.%s$" % tuple([entity_name_regex] * 2)
)
logger = logging.getLogger(__name__)
class Yaml2CamelCaseConverter():
"""Tool that transforms entity names of YAML ontologies.
Input: YAML with with entity name in ALL_CAPS
Output: YAML ontology in CamelCase.
"""
def __init__(self, file_path):
"""Initialize the converter.
Args:
file_path (path): Path to the yaml file to convert
"""
self.file_path = file_path
with open(file_path, 'r') as file:
self.doc = yaml.safe_load(file)
self.onto_doc = self.doc[ONTOLOGY_KEY]
self.orig_onto_doc = deepcopy(self.onto_doc)
self.namespace = self.doc[NAMESPACE_KEY].lower()
self.ambiguity_resolution = dict()
def convert(self):
"""Convert the yaml file to CamelCase."""
self.doc[NAMESPACE_KEY] = self.namespace
if REQUIREMENTS_KEY in self.doc:
self.doc[REQUIREMENTS_KEY] = [x.lower()
for x in self.doc[REQUIREMENTS_KEY]]
self.convert_nested_doc(self.onto_doc, pattern=entity_name_pattern)
def convert_nested_doc(self, doc, pattern=qualified_entity_name_pattern):
"""Convert the document to CamelCase.
Args:
doc (Any): The document to convert
pattern (re.Pattern, optional): The pattern for the entities.
Defaults to qualified_entity_name_pattern.
"""
if isinstance(doc, list):
new = list()
for elem in doc:
if elem == "CUBA.ENTITY":
new.append("cuba.Entity")
elif isinstance(elem, str) and pattern.match(elem):
new.append(self.toCamelCase(elem))
else:
new.append(elem)
self.convert_nested_doc(elem)
doc.clear()
doc.extend(new)
if isinstance(doc, dict):
new = dict()
for key, value in doc.items():
if isinstance(key, str) and pattern.match(key):
new[self.toCamelCase(key)] = value
self.convert_nested_doc(value)
elif value == "CUBA.ENTITY":
new[key] = "cuba.Entity"
elif isinstance(value, str) and pattern.match(value):
new[key] = self.toCamelCase(value)
else:
new[key] = value
self.convert_nested_doc(value)
doc.clear()
doc.update(new)
def store(self, output):
"""Update the ontology on disc."""
if not output:
output = self.file_path
if output == self.file_path:
logger.info(f"Backing up original file at {output}.orig")
orig_path = f"{output}.orig"
orig_counter = 0
while os.path.exists(orig_path):
orig_path = f"{output}.orig[{orig_counter}]"
orig_counter += 1
shutil.copyfile(str(output), orig_path)
logger.info(f"Writing camel case file to {output}")
with open(output, "w") as f:
yaml.safe_dump(self.doc, f)
def get_first_letter_caps(self, word, internal=False):
"""Check whether a entity name should start with lower or uppercase.
Args:
word (str): The entity name to check
internal (bool, optional): True iff this method has been
called recursively. Defaults to False.
Raises:
ValueError: Undefined entity name
Returns:
Optional[bool]: True iff word should start with uppercase.
Will always return a bool if internal is False.
"""
# cuba cases
if word in ["CUBA.RELATIONSHIP", "CUBA.ACTIVE_RELATIONSHIP",
"CUBA.PASSIVE_RELATIONSHIP", "CUBA.ATTRIBUTE",
"CUBA.PATH"]:
return False
if word in ["CUBA.WRAPPER", "CUBA.NOTHING", "CUBA.FILE"]:
return True
if word == "CUBA.ENTITY":
return None if internal else True
# qualified cases
if "." in word:
namespace, name = word.split(".")
if namespace.lower() == self.namespace:
x = self.get_first_letter_caps(name, internal=True)
return True if x is None and not internal else x
if word in self.ambiguity_resolution:
return self.ambiguity_resolution[word]
ar = input(f"Is {word} an ontology class (y/n)? ") \
.lower().strip().startswith("y")
self.ambiguity_resolution[word] = ar
return ar
# unqualified cases
if word not in self.orig_onto_doc:
raise ValueError(f"Undefined entity {word}")
subclasses = self.orig_onto_doc[word][SUPERCLASSES_KEY]
if any(isinstance(subclass, dict) for subclass in subclasses):
return True
if any(self.get_first_letter_caps(subclass, True) is False
for subclass in subclasses):
return False
if any(self.get_first_letter_caps(subclass, True) is True
for subclass in subclasses):
return True
return None if internal else True
def toCamelCase(self, word):
"""Convert the given entity name to camel case.
Args:
word (str): The word to convert to CamelCase
Returns:
str: The entity name in CamelCase
"""
first_letter_caps = self.get_first_letter_caps(word)
result = ""
if "." in word:
result += word.split(".")[0].lower() + "."
word = word.split(".")[1]
result += word[0].upper() if first_letter_caps else word[0].lower()
next_upper = False
for c in word[1:]:
if c == "_":
next_upper = True
elif next_upper:
result += c.upper()
next_upper = False
else:
result += c.lower()
return result
def run_from_terminal():
"""Run yaml2camelcase from the terminal."""
# Parse the user arguments
parser = argparse.ArgumentParser(
description="Convert a YAML ontology with ALL_CAPS entity names to a "
"YAML ontology using CamelCase"
)
parser.add_argument("input", type=Path, help="The input yaml file.")
parser.add_argument("-o", "--output", type=Path, required=False,
default=None, help="The input yaml file.")
args = parser.parse_args()
c = Yaml2CamelCaseConverter(args.input)
c.convert()
c.store(args.output)
if __name__ == "__main__":
run_from_terminal()
|
the-stack_106_29419 | import torch as t
import torch.nn as nn
import numpy as np
from typing import Union, Iterable
from torch.utils.data import IterableDataset
from pgportfolio.marketdata.coin_data_manager import \
coin_data_manager_init_helper
from pgportfolio.utils.misc import get_feature_list, parse_time
class PGPDataset(IterableDataset):
"""
A wrapper over PGPBuffer.next_batch, so it can be used by pytorch
Dataloader.
"""
def __init__(self, buffer: 'PGPBuffer', source: str) -> None:
self.buffer = buffer
self.source = source
def __iter__(self) -> Iterable:
return self
def __next__(self):
return self.buffer.next_batch(self.source)
class PGPBuffer(nn.Module):
def __init__(self,
coin_features: np.ndarray,
batch_size=50,
window_size=50,
test_portion=0.15,
validation_portion=0.1,
sample_bias=0.1,
portion_reversed=False,
device="cpu"):
"""
Args:
coin_features: Coin features in shape [feature, coin, time].
window_size: Periods of input data
test_portion: Portion of testing set, training portion is
`1 - test_portion-validation_portion`.
validation_portion: Portion of validation set.
portion_reversed: If False, the order of sets is (train, test)
else the order is (test, train).
device: Pytorch device to store information on.
"""
super(PGPBuffer, self).__init__()
assert coin_features.ndim == 3
coin_num = coin_features.shape[1]
period_num = coin_features.shape[2]
coin_features = t.tensor(coin_features, device=device)
# portfolio vector memory
pvm = t.full([period_num, coin_num], 1.0 / coin_num, device=device)
self.register_buffer("_coin_features", coin_features, True)
self.register_buffer("_pvm", pvm, True)
self._batch_size = batch_size
self._window_size = window_size
self._sample_bias = sample_bias
self._portion_reversed = portion_reversed
self._train_idx, self._test_idx, self._val_idx = \
self._divide_data(period_num, window_size, test_portion,
validation_portion, portion_reversed)
# the count of appended experiences
self._new_exp_count = 0
@property
def train_num(self):
return len(self._train_idx)
@property
def test_num(self):
return len(self._test_idx)
@property
def val_num(self):
return len(self._val_idx)
def get_train_set(self):
"""
Returns:
All samples from the train set.
"""
return self._pack_samples(self._train_idx)
def get_test_set(self):
"""
Returns:
All samples from the test set.
"""
return self._pack_samples(self._test_idx)
def get_val_set(self):
"""
Returns:
All samples from the validation set.
"""
return self._pack_samples(self._val_idx)
def get_train_dataset(self):
return PGPDataset(self, "train")
def get_test_dataset(self):
return PGPDataset(self, "test")
def get_val_dataset(self):
return PGPDataset(self, "val")
def append_experience(self,
coin_features: np.ndarray,
pvm: Union[t.tensor, None] = None):
"""
Used in online training. Append new experience and coin features
to the current buffer.
Args:
coin_features: New coin features following the current features,
shape is [feature, coin, time].
pvm: New pvm weights, shape is [time, coin], let it be
None if in the back-test case.
"""
if not self._portion_reversed:
raise RuntimeError("Cannot append experience to training set "
"when portions of data are not in"
"the reverse order.")
self._new_exp_count += coin_features.shape[-1]
self._train_idx += list(range(
self._train_idx[-1], self._train_idx[-1] + coin_features.shape[-1]
))
device = self._coin_features.device
self._coin_features = t.cat(
[self._coin_features, t.tensor(coin_features, device=device)]
)
self._pvm = t.cat([self._pvm, pvm.to(device)])
def next_batch(self, source="train"):
"""
Returns:
The next batch of training sample, the batch is contiguous in time.
The sample is a dictionary with keys:
"X": input data [batch, feature, coin, time];
"y": future relative price [batch, norm_feature, coin];
"last_w:" a numpy array with shape [batch_size, assets];
"setw": a callback function used to update the PVM memory.
"""
if source == "train":
start_idx = self._train_idx[0]
end_idx = self._train_idx[-1]
elif source == "test":
start_idx = self._test_idx[0]
end_idx = self._test_idx[-1]
elif source == "val":
start_idx = self._val_idx[0]
end_idx = self._val_idx[-1]
else:
raise ValueError("Unknown source")
batch_start = self._sample_geometric(
start_idx, end_idx, self._sample_bias
)
batch_idx = list(range(batch_start, batch_start + self._batch_size))
batch = self._pack_samples(batch_idx)
return batch
def _pack_samples(self, index):
index = np.array(index)
last_w = self._pvm[index - 1, :]
def setw(w):
assert t.is_tensor(w)
self._pvm[index, :] = w.to(self._pvm.device).detach()
batch = t.stack([
self._coin_features[:, :, idx:idx + self._window_size + 1]
for idx in index
])
# features, [batch, feature, coin, time]
X = batch[:, :, :, :-1]
# price relative vector of the last period, [batch, norm_feature, coin]
y = batch[:, :, :, -1] / batch[:, 0, None, :, -2]
return {"X": X, "y": y, "last_w": last_w, "setw": setw}
@staticmethod
def _sample_geometric(start, end, bias):
"""
Generate a index within [start, end) with geometric probability.
Args:
bias: A value in (0, 1).
"""
ran = np.random.geometric(bias)
while ran > end - start:
ran = np.random.geometric(bias)
result = end - ran
return result
@staticmethod
def _divide_data(period_num,
window_size,
test_portion,
val_portion,
portion_reversed):
"""
Divide training data into three portions, train, test and validation.
Args:
period_num: Number of price records in the time dimension.
window_size: Sliding window size of history price records
visible to the agent.
test_portion/val_portion: Percent of these two portions.
portion_reversed: Whether reverse the order of portions.
Returns:
Three np.ndarray type index arrays, train, test, validation.
"""
train_portion = 1 - test_portion - val_portion
indices = np.arange(period_num)
if portion_reversed:
split_point = np.array(
[val_portion, val_portion + test_portion]
)
split_idx = (split_point * period_num).astype(int)
val_idx, test_idx, train_idx = np.split(indices, split_idx)
else:
split_point = np.array(
[train_portion, train_portion + test_portion]
)
split_idx = (split_point * period_num).astype(int)
train_idx, test_idx, val_idx = np.split(indices, split_idx)
# truncate records in the last time window, otherwise we may
# sample insufficient samples when reaching the last window.
train_idx = train_idx[:-(window_size + 1)]
test_idx = test_idx[:-(window_size + 1)]
val_idx = val_idx[:-(window_size + 1)]
return train_idx, test_idx, val_idx
def buffer_init_helper(config, device, online=True, db_directory=None):
input_config = config["input"]
train_config = config["training"]
cdm, features = coin_data_manager_init_helper(
config, online=online, download=True, db_directory=db_directory
)
buffer = PGPBuffer(
features,
batch_size=train_config["batch_size"],
window_size=input_config["window_size"],
test_portion=input_config["test_portion"],
validation_portion=input_config["validation_portion"],
sample_bias=train_config["buffer_biased"],
portion_reversed=input_config["portion_reversed"],
device=device,
)
return cdm, buffer
|
the-stack_106_29420 | """The builtin dict implementation"""
from rpython.rlib import jit, rerased, objectmodel
from rpython.rlib.debug import mark_dict_non_null
from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize
from rpython.tool.sourcetools import func_renamer, func_with_new_name
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import (
WrappedDefault, applevel, interp2app, unwrap_spec)
from pypy.interpreter.mixedmodule import MixedModule
from pypy.interpreter.signature import Signature
from pypy.interpreter.typedef import TypeDef
from pypy.objspace.std.util import negate
UNROLL_CUTOFF = 5
def _never_equal_to_string(space, w_lookup_type):
"""Handles the case of a non string key lookup.
Types that have a sane hash/eq function should allow us to return True
directly to signal that the key is not in the dict in any case.
XXX The types should provide such a flag. """
# XXX there are many more types
return (space.is_w(w_lookup_type, space.w_NoneType) or
space.is_w(w_lookup_type, space.w_int) or
space.is_w(w_lookup_type, space.w_bool) or
space.is_w(w_lookup_type, space.w_float))
@specialize.call_location()
def w_dict_unrolling_heuristic(w_dct):
"""In which cases iterating over dict items can be unrolled.
Note that w_dct is an instance of W_DictMultiObject, not necesarilly
an actual dict
"""
return jit.isvirtual(w_dct) or (jit.isconstant(w_dct) and
w_dct.length() <= UNROLL_CUTOFF)
class W_DictMultiObject(W_Root):
""" Abstract base class that does not store a strategy. """
__slots__ = ['space', 'dstorage']
def get_strategy(self):
raise NotImplementedError("abstract method")
def set_strategy(self, strategy):
raise NotImplementedError("abstract method")
@staticmethod
def allocate_and_init_instance(space, w_type=None, module=False,
instance=False, strdict=False,
kwargs=False):
if space.config.objspace.std.withcelldict and module:
from pypy.objspace.std.celldict import ModuleDictStrategy
assert w_type is None
# every module needs its own strategy, because the strategy stores
# the version tag
strategy = ModuleDictStrategy(space)
storage = strategy.get_empty_storage()
w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict)
W_ModuleDictObject.__init__(w_obj, space, strategy, storage)
return w_obj
elif space.config.objspace.std.withmapdict and instance:
from pypy.objspace.std.mapdict import MapDictStrategy
strategy = space.fromcache(MapDictStrategy)
elif instance or strdict or module:
assert w_type is None
strategy = space.fromcache(BytesDictStrategy)
elif kwargs:
assert w_type is None
from pypy.objspace.std.kwargsdict import EmptyKwargsDictStrategy
strategy = space.fromcache(EmptyKwargsDictStrategy)
else:
strategy = space.fromcache(EmptyDictStrategy)
if w_type is None:
w_type = space.w_dict
storage = strategy.get_empty_storage()
w_obj = space.allocate_instance(W_DictObject, w_type)
W_DictObject.__init__(w_obj, space, strategy, storage)
return w_obj
def __init__(self, space, storage):
self.space = space
self.dstorage = storage
def __repr__(self):
"""representation for debugging purposes"""
return "%s(%s)" % (self.__class__.__name__, self.get_strategy())
def unwrap(w_dict, space):
result = {}
items = w_dict.items()
for w_pair in items:
key, val = space.unwrap(w_pair)
result[key] = val
return result
def missing_method(w_dict, space, w_key):
if not space.is_w(space.type(w_dict), space.w_dict):
w_missing = space.lookup(w_dict, '__missing__')
if w_missing is not None:
return space.get_and_call_function(w_missing, w_dict, w_key)
return None
def initialize_content(self, list_pairs_w):
for w_k, w_v in list_pairs_w:
self.setitem(w_k, w_v)
def setitem_str(self, key, w_value):
self.get_strategy().setitem_str(self, key, w_value)
@staticmethod
def descr_new(space, w_dicttype, __args__):
w_obj = W_DictMultiObject.allocate_and_init_instance(space, w_dicttype)
return w_obj
@staticmethod
def descr_fromkeys(space, w_type, w_keys, w_fill=None):
if w_fill is None:
w_fill = space.w_None
if space.is_w(w_type, space.w_dict):
w_dict = W_DictMultiObject.allocate_and_init_instance(space,
w_type)
byteslist = space.listview_bytes(w_keys)
if byteslist is not None:
for key in byteslist:
w_dict.setitem_str(key, w_fill)
else:
for w_key in space.listview(w_keys):
w_dict.setitem(w_key, w_fill)
else:
w_dict = space.call_function(w_type)
for w_key in space.listview(w_keys):
space.setitem(w_dict, w_key, w_fill)
return w_dict
def descr_init(self, space, __args__):
init_or_update(space, self, __args__, 'dict')
def descr_repr(self, space):
ec = space.getexecutioncontext()
w_currently_in_repr = ec._py_repr
if w_currently_in_repr is None:
w_currently_in_repr = ec._py_repr = space.newdict()
return dictrepr(space, w_currently_in_repr, self)
def descr_eq(self, space, w_other):
if space.is_w(self, w_other):
return space.w_True
if not isinstance(w_other, W_DictMultiObject):
return space.w_NotImplemented
if self.length() != w_other.length():
return space.w_False
iteratorimplementation = self.iteritems()
while True:
w_key, w_val = iteratorimplementation.next_item()
if w_key is None:
break
w_rightval = w_other.getitem(w_key)
if w_rightval is None:
return space.w_False
if not space.eq_w(w_val, w_rightval):
return space.w_False
return space.w_True
def descr_lt(self, space, w_other):
if not isinstance(w_other, W_DictMultiObject):
return space.w_NotImplemented
return self._compare_lt(space, w_other)
def descr_gt(self, space, w_other):
if not isinstance(w_other, W_DictMultiObject):
return space.w_NotImplemented
return w_other._compare_lt(space, self)
def _compare_lt(self, space, w_other):
# Different sizes, no problem
if self.length() < w_other.length():
return space.w_True
if self.length() > w_other.length():
return space.w_False
# Same size
w_leftdiff, w_leftval = characterize(space, self, w_other)
if w_leftdiff is None:
return space.w_False
w_rightdiff, w_rightval = characterize(space, w_other, self)
if w_rightdiff is None:
# w_leftdiff is not None, w_rightdiff is None
return space.w_True
w_res = space.lt(w_leftdiff, w_rightdiff)
if (not space.is_true(w_res) and
space.eq_w(w_leftdiff, w_rightdiff) and
w_rightval is not None):
w_res = space.lt(w_leftval, w_rightval)
return w_res
descr_ne = negate(descr_eq)
descr_le = negate(descr_gt)
descr_ge = negate(descr_lt)
def descr_len(self, space):
return space.wrap(self.length())
def descr_iter(self, space):
return W_DictMultiIterKeysObject(space, self.iterkeys())
def descr_contains(self, space, w_key):
return space.newbool(self.getitem(w_key) is not None)
def descr_getitem(self, space, w_key):
w_value = self.getitem(w_key)
if w_value is not None:
return w_value
w_missing_item = self.missing_method(space, w_key)
if w_missing_item is not None:
return w_missing_item
space.raise_key_error(w_key)
def descr_setitem(self, space, w_newkey, w_newvalue):
self.setitem(w_newkey, w_newvalue)
def descr_delitem(self, space, w_key):
try:
self.delitem(w_key)
except KeyError:
space.raise_key_error(w_key)
def descr_reversed(self, space):
raise oefmt(space.w_TypeError,
"argument to reversed() must be a sequence")
def descr_copy(self, space):
"""D.copy() -> a shallow copy of D"""
w_new = W_DictMultiObject.allocate_and_init_instance(space)
update1_dict_dict(space, w_new, self)
return w_new
def descr_items(self, space):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return space.newlist(self.items())
def descr_keys(self, space):
"""D.keys() -> list of D's keys"""
return self.w_keys()
def descr_values(self, space):
"""D.values() -> list of D's values"""
return space.newlist(self.values())
def descr_iteritems(self, space):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return W_DictMultiIterItemsObject(space, self.iteritems())
def descr_iterkeys(self, space):
"""D.iterkeys() -> an iterator over the keys of D"""
return W_DictMultiIterKeysObject(space, self.iterkeys())
def descr_itervalues(self, space):
"""D.itervalues() -> an iterator over the values of D"""
return W_DictMultiIterValuesObject(space, self.itervalues())
def nondescr_reversed_dict(self, space):
"""Not exposed directly to app-level, but via __pypy__.reversed_dict().
"""
strategy = self.get_strategy()
if strategy.has_iterreversed:
it = strategy.iterreversed(self)
return W_DictMultiIterKeysObject(space, it)
else:
# fall-back
w_keys = self.w_keys()
return space.call_method(w_keys, '__reversed__')
def descr_viewitems(self, space):
"""D.viewitems() -> a set-like object providing a view on D's items"""
return W_DictViewItemsObject(space, self)
def descr_viewkeys(self, space):
"""D.viewkeys() -> a set-like object providing a view on D's keys"""
return W_DictViewKeysObject(space, self)
def descr_viewvalues(self, space):
"""D.viewvalues() -> an object providing a view on D's values"""
return W_DictViewValuesObject(space, self)
def descr_has_key(self, space, w_key):
"""D.has_key(k) -> True if D has a key k, else False"""
return space.newbool(self.getitem(w_key) is not None)
def descr_clear(self, space):
"""D.clear() -> None. Remove all items from D."""
self.clear()
@unwrap_spec(w_default=WrappedDefault(None))
def descr_get(self, space, w_key, w_default):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
w_value = self.getitem(w_key)
return w_value if w_value is not None else w_default
@unwrap_spec(defaults_w='args_w')
def descr_pop(self, space, w_key, defaults_w):
"""D.pop(k[,d]) -> v, remove specified key and return the
corresponding value\nIf key is not found, d is returned if given,
otherwise KeyError is raised
"""
len_defaults = len(defaults_w)
if len_defaults > 1:
raise oefmt(space.w_TypeError,
"pop expected at most 2 arguments, got %d",
1 + len_defaults)
w_item = self.getitem(w_key)
if w_item is None:
if len_defaults > 0:
return defaults_w[0]
else:
space.raise_key_error(w_key)
else:
self.delitem(w_key)
return w_item
def descr_popitem(self, space):
"""D.popitem() -> (k, v), remove and return some (key, value) pair as
a\n2-tuple; but raise KeyError if D is empty"""
try:
w_key, w_value = self.popitem()
except KeyError:
raise oefmt(space.w_KeyError, "popitem(): dictionary is empty")
return space.newtuple([w_key, w_value])
@unwrap_spec(w_default=WrappedDefault(None))
def descr_setdefault(self, space, w_key, w_default):
"""D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D"""
return self.setdefault(w_key, w_default)
def descr_update(self, space, __args__):
"""D.update(E, **F) -> None. Update D from E and F: for k in E: D[k]
= E[k]\n(if E has keys else: for (k, v) in E: D[k] = v) then: for k in
F: D[k] = F[k]"""
init_or_update(space, self, __args__, 'dict.update')
def ensure_object_strategy(self): # for cpyext
object_strategy = self.space.fromcache(ObjectDictStrategy)
strategy = self.get_strategy()
if strategy is not object_strategy:
strategy.switch_to_object_strategy(self)
class W_DictObject(W_DictMultiObject):
""" a regular dict object """
__slots__ = ['dstrategy']
def __init__(self, space, strategy, storage):
W_DictMultiObject.__init__(self, space, storage)
self.dstrategy = strategy
def get_strategy(self):
return self.dstrategy
def set_strategy(self, strategy):
self.dstrategy = strategy
class W_ModuleDictObject(W_DictMultiObject):
""" a dict object for a module, that is not expected to change. It stores
the strategy as a quasi-immutable field. """
__slots__ = ['mstrategy']
_immutable_fields_ = ['mstrategy?']
def __init__(self, space, strategy, storage):
W_DictMultiObject.__init__(self, space, storage)
self.mstrategy = strategy
def get_strategy(self):
return self.mstrategy
def set_strategy(self, strategy):
self.mstrategy = strategy
def _add_indirections():
dict_methods = "getitem getitem_str setitem setdefault \
popitem delitem clear \
length w_keys values items \
iterkeys itervalues iteritems \
listview_bytes listview_unicode listview_int \
view_as_kwargs".split()
def make_method(method):
def f(self, *args):
return getattr(self.get_strategy(), method)(self, *args)
f.func_name = method
return f
for method in dict_methods:
setattr(W_DictMultiObject, method, make_method(method))
_add_indirections()
app = applevel('''
def dictrepr(currently_in_repr, d):
if len(d) == 0:
return "{}"
dict_id = id(d)
if dict_id in currently_in_repr:
return '{...}'
currently_in_repr[dict_id] = 1
try:
items = []
# XXX for now, we cannot use iteritems() at app-level because
# we want a reasonable result instead of a RuntimeError
# even if the dict is mutated by the repr() in the loop.
for k, v in dict.items(d):
items.append(repr(k) + ": " + repr(v))
return "{" + ', '.join(items) + "}"
finally:
try:
del currently_in_repr[dict_id]
except:
pass
''', filename=__file__)
dictrepr = app.interphook("dictrepr")
W_DictMultiObject.typedef = TypeDef("dict",
__doc__ = '''dict() -> new empty dictionary.
dict(mapping) -> new dictionary initialized from a mapping object\'s
(key, value) pairs.
dict(seq) -> new dictionary initialized as if via:
d = {}
for k, v in seq:
d[k] = v
dict(**kwargs) -> new dictionary initialized with the name=value pairs
in the keyword argument list. For example: dict(one=1, two=2)''',
__new__ = interp2app(W_DictMultiObject.descr_new),
fromkeys = interp2app(W_DictMultiObject.descr_fromkeys,
as_classmethod=True),
__hash__ = None,
__repr__ = interp2app(W_DictMultiObject.descr_repr),
__init__ = interp2app(W_DictMultiObject.descr_init),
__eq__ = interp2app(W_DictMultiObject.descr_eq),
__ne__ = interp2app(W_DictMultiObject.descr_ne),
__lt__ = interp2app(W_DictMultiObject.descr_lt),
__le__ = interp2app(W_DictMultiObject.descr_le),
__gt__ = interp2app(W_DictMultiObject.descr_gt),
__ge__ = interp2app(W_DictMultiObject.descr_ge),
__len__ = interp2app(W_DictMultiObject.descr_len),
__iter__ = interp2app(W_DictMultiObject.descr_iter),
__contains__ = interp2app(W_DictMultiObject.descr_contains),
__getitem__ = interp2app(W_DictMultiObject.descr_getitem),
__setitem__ = interp2app(W_DictMultiObject.descr_setitem),
__delitem__ = interp2app(W_DictMultiObject.descr_delitem),
__reversed__ = interp2app(W_DictMultiObject.descr_reversed),
copy = interp2app(W_DictMultiObject.descr_copy),
items = interp2app(W_DictMultiObject.descr_items),
keys = interp2app(W_DictMultiObject.descr_keys),
values = interp2app(W_DictMultiObject.descr_values),
iteritems = interp2app(W_DictMultiObject.descr_iteritems),
iterkeys = interp2app(W_DictMultiObject.descr_iterkeys),
itervalues = interp2app(W_DictMultiObject.descr_itervalues),
viewkeys = interp2app(W_DictMultiObject.descr_viewkeys),
viewitems = interp2app(W_DictMultiObject.descr_viewitems),
viewvalues = interp2app(W_DictMultiObject.descr_viewvalues),
has_key = interp2app(W_DictMultiObject.descr_has_key),
clear = interp2app(W_DictMultiObject.descr_clear),
get = interp2app(W_DictMultiObject.descr_get),
pop = interp2app(W_DictMultiObject.descr_pop),
popitem = interp2app(W_DictMultiObject.descr_popitem),
setdefault = interp2app(W_DictMultiObject.descr_setdefault),
update = interp2app(W_DictMultiObject.descr_update),
)
class DictStrategy(object):
def __init__(self, space):
self.space = space
def get_empty_storage(self):
raise NotImplementedError
@jit.look_inside_iff(lambda self, w_dict:
w_dict_unrolling_heuristic(w_dict))
def w_keys(self, w_dict):
iterator = self.iterkeys(w_dict)
result = newlist_hint(self.length(w_dict))
while True:
w_key = iterator.next_key()
if w_key is not None:
result.append(w_key)
else:
return self.space.newlist(result)
def values(self, w_dict):
iterator = self.itervalues(w_dict)
result = newlist_hint(self.length(w_dict))
while True:
w_value = iterator.next_value()
if w_value is not None:
result.append(w_value)
else:
return result
def items(self, w_dict):
iterator = self.iteritems(w_dict)
result = newlist_hint(self.length(w_dict))
while True:
w_key, w_value = iterator.next_item()
if w_key is not None:
result.append(self.space.newtuple([w_key, w_value]))
else:
return result
def popitem(self, w_dict):
# this is a bad implementation: if we call popitem() repeatedly,
# it ends up taking n**2 time, because the next() calls below
# will take longer and longer. But all interesting strategies
# provide a better one.
iterator = self.iteritems(w_dict)
w_key, w_value = iterator.next_item()
if w_key is None:
raise KeyError
self.delitem(w_dict, w_key)
return (w_key, w_value)
def clear(self, w_dict):
strategy = self.space.fromcache(EmptyDictStrategy)
storage = strategy.get_empty_storage()
w_dict.set_strategy(strategy)
w_dict.dstorage = storage
def listview_bytes(self, w_dict):
return None
def listview_unicode(self, w_dict):
return None
def listview_int(self, w_dict):
return None
def view_as_kwargs(self, w_dict):
return (None, None)
def getiterkeys(self, w_dict):
raise NotImplementedError
def getitervalues(self, w_dict):
raise NotImplementedError
def getiteritems_with_hash(self, w_dict):
raise NotImplementedError
has_iterreversed = False
# no 'getiterreversed': no default implementation available
def rev_update1_dict_dict(self, w_dict, w_updatedict):
iteritems = self.iteritems(w_dict)
while True:
w_key, w_value = iteritems.next_item()
if w_key is None:
break
w_updatedict.setitem(w_key, w_value)
def prepare_update(self, w_dict, num_extra):
pass
class EmptyDictStrategy(DictStrategy):
erase, unerase = rerased.new_erasing_pair("empty")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def get_empty_storage(self):
return self.erase(None)
def switch_to_correct_strategy(self, w_dict, w_key):
withidentitydict = self.space.config.objspace.std.withidentitydict
if type(w_key) is self.space.StringObjectCls:
self.switch_to_bytes_strategy(w_dict)
return
elif type(w_key) is self.space.UnicodeObjectCls:
self.switch_to_unicode_strategy(w_dict)
return
w_type = self.space.type(w_key)
if self.space.is_w(w_type, self.space.w_int):
self.switch_to_int_strategy(w_dict)
elif withidentitydict and w_type.compares_by_identity():
self.switch_to_identity_strategy(w_dict)
else:
self.switch_to_object_strategy(w_dict)
def switch_to_bytes_strategy(self, w_dict):
strategy = self.space.fromcache(BytesDictStrategy)
storage = strategy.get_empty_storage()
w_dict.set_strategy(strategy)
w_dict.dstorage = storage
def switch_to_unicode_strategy(self, w_dict):
strategy = self.space.fromcache(UnicodeDictStrategy)
storage = strategy.get_empty_storage()
w_dict.set_strategy(strategy)
w_dict.dstorage = storage
def switch_to_int_strategy(self, w_dict):
strategy = self.space.fromcache(IntDictStrategy)
storage = strategy.get_empty_storage()
w_dict.set_strategy(strategy)
w_dict.dstorage = storage
def switch_to_identity_strategy(self, w_dict):
from pypy.objspace.std.identitydict import IdentityDictStrategy
strategy = self.space.fromcache(IdentityDictStrategy)
storage = strategy.get_empty_storage()
w_dict.set_strategy(strategy)
w_dict.dstorage = storage
def switch_to_object_strategy(self, w_dict):
strategy = self.space.fromcache(ObjectDictStrategy)
storage = strategy.get_empty_storage()
w_dict.set_strategy(strategy)
w_dict.dstorage = storage
def getitem(self, w_dict, w_key):
#return w_value or None
# in case the key is unhashable, try to hash it
self.space.hash(w_key)
# return None anyway
return None
def getitem_str(self, w_dict, key):
#return w_value or None
return None
def setdefault(self, w_dict, w_key, w_default):
# here the dict is always empty
self.switch_to_correct_strategy(w_dict, w_key)
w_dict.setitem(w_key, w_default)
return w_default
def setitem(self, w_dict, w_key, w_value):
self.switch_to_correct_strategy(w_dict, w_key)
w_dict.setitem(w_key, w_value)
def setitem_str(self, w_dict, key, w_value):
self.switch_to_bytes_strategy(w_dict)
w_dict.setitem_str(key, w_value)
def delitem(self, w_dict, w_key):
# in case the key is unhashable, try to hash it
self.space.hash(w_key)
raise KeyError
def length(self, w_dict):
return 0
def clear(self, w_dict):
return
def popitem(self, w_dict):
raise KeyError
def view_as_kwargs(self, w_dict):
return ([], [])
# ---------- iterator interface ----------------
def getiterkeys(self, w_dict):
return iter([])
def getitervalues(self, w_dict):
return iter([])
def getiteritems_with_hash(self, w_dict):
return iter([])
def getiterreversed(self, w_dict):
return iter([])
# Iterator Implementation base classes
def _new_next(TP):
if TP in ('key', 'value'):
EMPTY = None
else:
EMPTY = None, None
def next(self):
if self.dictimplementation is None:
return EMPTY
space = self.space
if self.len != self.dictimplementation.length():
self.len = -1 # Make this error state sticky
raise oefmt(space.w_RuntimeError,
"dictionary changed size during iteration")
# look for the next entry
if self.pos < self.len:
result = getattr(self, 'next_' + TP + '_entry')()
self.pos += 1
if self.strategy is self.dictimplementation.get_strategy():
return result # common case
else:
# waaa, obscure case: the strategy changed, but not the
# length of the dict. The (key, value) pair in 'result'
# might be out-of-date. We try to explicitly look up
# the key in the dict.
if TP == 'key' or TP == 'value':
return result
w_key = result[0]
w_value = self.dictimplementation.getitem(w_key)
if w_value is None:
self.len = -1 # Make this error state sticky
raise oefmt(space.w_RuntimeError,
"dictionary changed during iteration")
return (w_key, w_value)
# no more entries
self.dictimplementation = None
return EMPTY
return func_with_new_name(next, 'next_' + TP)
class BaseIteratorImplementation(object):
def __init__(self, space, strategy, implementation):
self.space = space
self.strategy = strategy
self.dictimplementation = implementation
self.len = implementation.length()
self.pos = 0
def length(self):
if self.dictimplementation is not None and self.len != -1:
return self.len - self.pos
return 0
def _cleanup_(self):
raise Exception("seeing a prebuilt %r object" % (
self.__class__,))
class BaseKeyIterator(BaseIteratorImplementation):
next_key = _new_next('key')
class BaseValueIterator(BaseIteratorImplementation):
next_value = _new_next('value')
class BaseItemIterator(BaseIteratorImplementation):
next_item = _new_next('item')
def create_iterator_classes(dictimpl):
if not hasattr(dictimpl, 'wrapkey'):
wrapkey = lambda space, key: key
else:
wrapkey = dictimpl.wrapkey.im_func
if not hasattr(dictimpl, 'wrapvalue'):
wrapvalue = lambda space, value: value
else:
wrapvalue = dictimpl.wrapvalue.im_func
if not hasattr(dictimpl, 'setitem_untyped'):
setitem_untyped = None
else:
setitem_untyped = dictimpl.setitem_untyped.im_func
setitem_untyped = func_with_new_name(setitem_untyped,
'setitem_untyped_%s' % dictimpl.__name__)
class IterClassKeys(BaseKeyIterator):
def __init__(self, space, strategy, impl):
self.iterator = strategy.getiterkeys(impl)
BaseIteratorImplementation.__init__(self, space, strategy, impl)
def next_key_entry(self):
for key in self.iterator:
return wrapkey(self.space, key)
else:
return None
class IterClassValues(BaseValueIterator):
def __init__(self, space, strategy, impl):
self.iterator = strategy.getitervalues(impl)
BaseIteratorImplementation.__init__(self, space, strategy, impl)
def next_value_entry(self):
for value in self.iterator:
return wrapvalue(self.space, value)
else:
return None
class IterClassItems(BaseItemIterator):
def __init__(self, space, strategy, impl):
self.iterator = strategy.getiteritems_with_hash(impl)
BaseIteratorImplementation.__init__(self, space, strategy, impl)
def next_item_entry(self):
for key, value, keyhash in self.iterator:
return (wrapkey(self.space, key),
wrapvalue(self.space, value))
else:
return None, None
class IterClassReversed(BaseKeyIterator):
def __init__(self, space, strategy, impl):
self.iterator = strategy.getiterreversed(impl)
BaseIteratorImplementation.__init__(self, space, strategy, impl)
def next_key_entry(self):
for key in self.iterator:
return wrapkey(self.space, key)
else:
return None
def iterkeys(self, w_dict):
return IterClassKeys(self.space, self, w_dict)
def itervalues(self, w_dict):
return IterClassValues(self.space, self, w_dict)
def iteritems(self, w_dict):
return IterClassItems(self.space, self, w_dict)
if hasattr(dictimpl, 'getiterreversed'):
def iterreversed(self, w_dict):
return IterClassReversed(self.space, self, w_dict)
dictimpl.iterreversed = iterreversed
dictimpl.has_iterreversed = True
@jit.look_inside_iff(lambda self, w_dict, w_updatedict:
w_dict_unrolling_heuristic(w_dict))
def rev_update1_dict_dict(self, w_dict, w_updatedict):
# the logic is to call prepare_dict_update() after the first setitem():
# it gives the w_updatedict a chance to switch its strategy.
if 1: # (preserve indentation)
iteritemsh = self.getiteritems_with_hash(w_dict)
if not same_strategy(self, w_updatedict):
# Different strategy. Try to copy one item of w_dict
for key, value, keyhash in iteritemsh:
w_key = wrapkey(self.space, key)
w_value = wrapvalue(self.space, value)
w_updatedict.setitem(w_key, w_value)
break
else:
return # w_dict is completely empty, nothing to do
count = w_dict.length() - 1
w_updatedict.get_strategy().prepare_update(w_updatedict, count)
# If the strategy is still different, continue the slow way
if not same_strategy(self, w_updatedict):
for key, value, keyhash in iteritemsh:
w_key = wrapkey(self.space, key)
w_value = wrapvalue(self.space, value)
w_updatedict.setitem(w_key, w_value)
return # done
else:
# Same strategy.
self.prepare_update(w_updatedict, w_dict.length())
#
# Use setitem_untyped() to speed up copying without
# wrapping/unwrapping the key.
assert setitem_untyped is not None
dstorage = w_updatedict.dstorage
for key, value, keyhash in iteritemsh:
setitem_untyped(self, dstorage, key, value, keyhash)
def same_strategy(self, w_otherdict):
return (setitem_untyped is not None and
w_otherdict.get_strategy() is self)
dictimpl.iterkeys = iterkeys
dictimpl.itervalues = itervalues
dictimpl.iteritems = iteritems
dictimpl.rev_update1_dict_dict = rev_update1_dict_dict
create_iterator_classes(EmptyDictStrategy)
# concrete subclasses of the above
class AbstractTypedStrategy(object):
_mixin_ = True
@staticmethod
def erase(storage):
raise NotImplementedError("abstract base class")
@staticmethod
def unerase(obj):
raise NotImplementedError("abstract base class")
def wrap(self, unwrapped):
raise NotImplementedError
def unwrap(self, wrapped):
raise NotImplementedError
def is_correct_type(self, w_obj):
raise NotImplementedError("abstract base class")
def get_empty_storage(self):
raise NotImplementedError("abstract base class")
def _never_equal_to(self, w_lookup_type):
raise NotImplementedError("abstract base class")
def setitem(self, w_dict, w_key, w_value):
if self.is_correct_type(w_key):
self.unerase(w_dict.dstorage)[self.unwrap(w_key)] = w_value
return
else:
self.switch_to_object_strategy(w_dict)
w_dict.setitem(w_key, w_value)
def setitem_str(self, w_dict, key, w_value):
self.switch_to_object_strategy(w_dict)
w_dict.setitem(self.space.wrap(key), w_value)
def setdefault(self, w_dict, w_key, w_default):
if self.is_correct_type(w_key):
return self.unerase(w_dict.dstorage).setdefault(self.unwrap(w_key),
w_default)
else:
self.switch_to_object_strategy(w_dict)
return w_dict.setdefault(w_key, w_default)
def delitem(self, w_dict, w_key):
if self.is_correct_type(w_key):
del self.unerase(w_dict.dstorage)[self.unwrap(w_key)]
return
else:
self.switch_to_object_strategy(w_dict)
return w_dict.delitem(w_key)
def length(self, w_dict):
return len(self.unerase(w_dict.dstorage))
def getitem_str(self, w_dict, key):
return self.getitem(w_dict, self.space.wrap(key))
def getitem(self, w_dict, w_key):
space = self.space
if self.is_correct_type(w_key):
return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None)
elif self._never_equal_to(space.type(w_key)):
return None
else:
self.switch_to_object_strategy(w_dict)
return w_dict.getitem(w_key)
def w_keys(self, w_dict):
l = [self.wrap(key)
for key in self.unerase(w_dict.dstorage).iterkeys()]
return self.space.newlist(l)
def values(self, w_dict):
return self.unerase(w_dict.dstorage).values()
def items(self, w_dict):
space = self.space
dict_w = self.unerase(w_dict.dstorage)
return [space.newtuple([self.wrap(key), w_value])
for (key, w_value) in dict_w.iteritems()]
def popitem(self, w_dict):
key, value = self.unerase(w_dict.dstorage).popitem()
return (self.wrap(key), value)
def clear(self, w_dict):
self.unerase(w_dict.dstorage).clear()
def switch_to_object_strategy(self, w_dict):
d = self.unerase(w_dict.dstorage)
strategy = self.space.fromcache(ObjectDictStrategy)
d_new = strategy.unerase(strategy.get_empty_storage())
for key, value in d.iteritems():
d_new[self.wrap(key)] = value
w_dict.set_strategy(strategy)
w_dict.dstorage = strategy.erase(d_new)
# --------------- iterator interface -----------------
def getiterkeys(self, w_dict):
return self.unerase(w_dict.dstorage).iterkeys()
def getitervalues(self, w_dict):
return self.unerase(w_dict.dstorage).itervalues()
def getiteritems_with_hash(self, w_dict):
return objectmodel.iteritems_with_hash(self.unerase(w_dict.dstorage))
def getiterreversed(self, w_dict):
return objectmodel.reversed_dict(self.unerase(w_dict.dstorage))
def prepare_update(self, w_dict, num_extra):
objectmodel.prepare_dict_update(self.unerase(w_dict.dstorage),
num_extra)
def setitem_untyped(self, dstorage, key, w_value, keyhash):
d = self.unerase(dstorage)
objectmodel.setitem_with_hash(d, key, keyhash, w_value)
class ObjectDictStrategy(AbstractTypedStrategy, DictStrategy):
erase, unerase = rerased.new_erasing_pair("object")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def wrap(self, unwrapped):
return unwrapped
def unwrap(self, wrapped):
return wrapped
def is_correct_type(self, w_obj):
return True
def get_empty_storage(self):
new_dict = r_dict(self.space.eq_w, self.space.hash_w,
force_non_null=True)
return self.erase(new_dict)
def _never_equal_to(self, w_lookup_type):
return False
def w_keys(self, w_dict):
return self.space.newlist(self.unerase(w_dict.dstorage).keys())
def setitem_str(self, w_dict, s, w_value):
self.setitem(w_dict, self.space.wrap(s), w_value)
def switch_to_object_strategy(self, w_dict):
assert 0, "should be unreachable"
create_iterator_classes(ObjectDictStrategy)
class BytesDictStrategy(AbstractTypedStrategy, DictStrategy):
erase, unerase = rerased.new_erasing_pair("bytes")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def wrap(self, unwrapped):
return self.space.wrap(unwrapped)
def unwrap(self, wrapped):
return self.space.str_w(wrapped)
def is_correct_type(self, w_obj):
space = self.space
return space.is_w(space.type(w_obj), space.w_str)
def get_empty_storage(self):
res = {}
mark_dict_non_null(res)
return self.erase(res)
def _never_equal_to(self, w_lookup_type):
return _never_equal_to_string(self.space, w_lookup_type)
def setitem_str(self, w_dict, key, w_value):
assert key is not None
self.unerase(w_dict.dstorage)[key] = w_value
def getitem(self, w_dict, w_key):
space = self.space
# -- This is called extremely often. Hack for performance --
if type(w_key) is space.StringObjectCls:
return self.getitem_str(w_dict, w_key.unwrap(space))
# -- End of performance hack --
return AbstractTypedStrategy.getitem(self, w_dict, w_key)
def getitem_str(self, w_dict, key):
assert key is not None
return self.unerase(w_dict.dstorage).get(key, None)
def listview_bytes(self, w_dict):
return self.unerase(w_dict.dstorage).keys()
def w_keys(self, w_dict):
return self.space.newlist_bytes(self.listview_bytes(w_dict))
def wrapkey(space, key):
return space.wrap(key)
@jit.look_inside_iff(lambda self, w_dict:
w_dict_unrolling_heuristic(w_dict))
def view_as_kwargs(self, w_dict):
d = self.unerase(w_dict.dstorage)
l = len(d)
keys, values = [None] * l, [None] * l
i = 0
for key, val in d.iteritems():
keys[i] = key
values[i] = val
i += 1
return keys, values
create_iterator_classes(BytesDictStrategy)
class UnicodeDictStrategy(AbstractTypedStrategy, DictStrategy):
erase, unerase = rerased.new_erasing_pair("unicode")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def wrap(self, unwrapped):
return self.space.wrap(unwrapped)
def unwrap(self, wrapped):
return self.space.unicode_w(wrapped)
def is_correct_type(self, w_obj):
space = self.space
return space.is_w(space.type(w_obj), space.w_unicode)
def get_empty_storage(self):
res = {}
mark_dict_non_null(res)
return self.erase(res)
def _never_equal_to(self, w_lookup_type):
return _never_equal_to_string(self.space, w_lookup_type)
# we should implement the same shortcuts as we do for BytesDictStrategy
## def setitem_str(self, w_dict, key, w_value):
## assert key is not None
## self.unerase(w_dict.dstorage)[key] = w_value
## def getitem(self, w_dict, w_key):
## space = self.space
## # -- This is called extremely often. Hack for performance --
## if type(w_key) is space.StringObjectCls:
## return self.getitem_str(w_dict, w_key.unwrap(space))
## # -- End of performance hack --
## return AbstractTypedStrategy.getitem(self, w_dict, w_key)
## def getitem_str(self, w_dict, key):
## assert key is not None
## return self.unerase(w_dict.dstorage).get(key, None)
def listview_unicode(self, w_dict):
return self.unerase(w_dict.dstorage).keys()
## def w_keys(self, w_dict):
## return self.space.newlist_bytes(self.listview_bytes(w_dict))
def wrapkey(space, key):
return space.wrap(key)
## @jit.look_inside_iff(lambda self, w_dict:
## w_dict_unrolling_heuristic(w_dict))
## def view_as_kwargs(self, w_dict):
## d = self.unerase(w_dict.dstorage)
## l = len(d)
## keys, values = [None] * l, [None] * l
## i = 0
## for key, val in d.iteritems():
## keys[i] = key
## values[i] = val
## i += 1
## return keys, values
create_iterator_classes(UnicodeDictStrategy)
class IntDictStrategy(AbstractTypedStrategy, DictStrategy):
erase, unerase = rerased.new_erasing_pair("int")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def wrap(self, unwrapped):
return self.space.wrap(unwrapped)
def unwrap(self, wrapped):
return self.space.int_w(wrapped)
def get_empty_storage(self):
return self.erase({})
def is_correct_type(self, w_obj):
space = self.space
return space.is_w(space.type(w_obj), space.w_int)
def _never_equal_to(self, w_lookup_type):
space = self.space
# XXX there are many more types
return (space.is_w(w_lookup_type, space.w_NoneType) or
space.is_w(w_lookup_type, space.w_str) or
space.is_w(w_lookup_type, space.w_unicode)
)
def listview_int(self, w_dict):
return self.unerase(w_dict.dstorage).keys()
def wrapkey(space, key):
return space.wrap(key)
def w_keys(self, w_dict):
return self.space.newlist_int(self.listview_int(w_dict))
create_iterator_classes(IntDictStrategy)
def update1(space, w_dict, w_data):
if isinstance(w_data, W_DictMultiObject): # optimization case only
update1_dict_dict(space, w_dict, w_data)
return
w_method = space.findattr(w_data, space.wrap("keys"))
if w_method is None:
# no 'keys' method, so we assume it is a sequence of pairs
data_w = space.listview(w_data)
update1_pairs(space, w_dict, data_w)
else:
# general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])"
data_w = space.listview(space.call_function(w_method))
update1_keys(space, w_dict, w_data, data_w)
def update1_dict_dict(space, w_dict, w_data):
w_data.get_strategy().rev_update1_dict_dict(w_data, w_dict)
def update1_pairs(space, w_dict, data_w):
for w_pair in data_w:
pair = space.fixedview(w_pair)
if len(pair) != 2:
raise oefmt(space.w_ValueError, "sequence of pairs expected")
w_key, w_value = pair
w_dict.setitem(w_key, w_value)
def update1_keys(space, w_dict, w_data, data_w):
for w_key in data_w:
w_value = space.getitem(w_data, w_key)
w_dict.setitem(w_key, w_value)
init_signature = Signature(['seq_or_map'], None, 'kwargs')
init_defaults = [None]
def init_or_update(space, w_dict, __args__, funcname):
w_src, w_kwds = __args__.parse_obj(
None, funcname,
init_signature, # signature
init_defaults) # default argument
if w_src is not None:
update1(space, w_dict, w_src)
if space.is_true(w_kwds):
update1(space, w_dict, w_kwds)
def characterize(space, w_a, w_b):
"""(similar to CPython)
returns the smallest key in acontent for which b's value is
different or absent and this value"""
w_smallest_diff_a_key = None
w_its_value = None
iteratorimplementation = w_a.iteritems()
while True:
w_key, w_val = iteratorimplementation.next_item()
if w_key is None:
break
if w_smallest_diff_a_key is None or space.is_true(space.lt(
w_key, w_smallest_diff_a_key)):
w_bvalue = w_b.getitem(w_key)
if w_bvalue is None:
w_its_value = w_val
w_smallest_diff_a_key = w_key
else:
if not space.eq_w(w_val, w_bvalue):
w_its_value = w_val
w_smallest_diff_a_key = w_key
return w_smallest_diff_a_key, w_its_value
# ____________________________________________________________
# Iteration
class W_BaseDictMultiIterObject(W_Root):
_immutable_fields_ = ["iteratorimplementation"]
def __init__(self, space, iteratorimplementation):
self.space = space
self.iteratorimplementation = iteratorimplementation
def descr_iter(self, space):
return self
def descr_length_hint(self, space):
return space.wrap(self.iteratorimplementation.length())
def descr_reduce(self, space):
"""
This is a slightly special case of pickling.
Since iteration over a dict is a bit hairy,
we do the following:
- create a clone of the dict iterator
- run it to the original position
- collect all remaining elements into a list
At unpickling time, we just use that list
and create an iterator on it.
This is of course not the standard way.
XXX to do: remove this __reduce__ method and do
a registration with copy_reg, instead.
"""
w_mod = space.getbuiltinmodule('_pickle_support')
mod = space.interp_w(MixedModule, w_mod)
new_inst = mod.get('dictiter_surrogate_new')
w_typeobj = space.type(self)
raise oefmt(space.w_TypeError,
"can't pickle dictionary-keyiterator objects")
# XXXXXX get that working again
# we cannot call __init__ since we don't have the original dict
if isinstance(self, W_DictMultiIterKeysObject):
w_clone = space.allocate_instance(W_DictMultiIterKeysObject,
w_typeobj)
elif isinstance(self, W_DictMultiIterValuesObject):
w_clone = space.allocate_instance(W_DictMultiIterValuesObject,
w_typeobj)
elif isinstance(self, W_DictMultiIterItemsObject):
w_clone = space.allocate_instance(W_DictMultiIterItemsObject,
w_typeobj)
else:
raise oefmt(space.w_TypeError,
"unsupported dictiter type '%R' during pickling", self)
w_clone.space = space
w_clone.content = self.content
w_clone.len = self.len
w_clone.pos = 0
w_clone.setup_iterator()
# spool until we have the same pos
while w_clone.pos < self.pos:
w_clone.next_entry()
w_clone.pos += 1
stuff = [w_clone.next_entry() for i in range(w_clone.pos, w_clone.len)]
w_res = space.newlist(stuff)
w_ret = space.newtuple([new_inst, space.newtuple([w_res])])
return w_ret
def _cleanup_(self):
raise Exception("seeing a prebuilt %r object" % (
self.__class__,))
class W_DictMultiIterKeysObject(W_BaseDictMultiIterObject):
def descr_next(self, space):
iteratorimplementation = self.iteratorimplementation
w_key = iteratorimplementation.next_key()
if w_key is not None:
return w_key
raise OperationError(space.w_StopIteration, space.w_None)
class W_DictMultiIterValuesObject(W_BaseDictMultiIterObject):
def descr_next(self, space):
iteratorimplementation = self.iteratorimplementation
w_value = iteratorimplementation.next_value()
if w_value is not None:
return w_value
raise OperationError(space.w_StopIteration, space.w_None)
class W_DictMultiIterItemsObject(W_BaseDictMultiIterObject):
def descr_next(self, space):
iteratorimplementation = self.iteratorimplementation
w_key, w_value = iteratorimplementation.next_item()
if w_key is not None:
return space.newtuple([w_key, w_value])
raise OperationError(space.w_StopIteration, space.w_None)
W_DictMultiIterItemsObject.typedef = TypeDef(
"dict_iteritems",
__iter__ = interp2app(W_DictMultiIterItemsObject.descr_iter),
next = interp2app(W_DictMultiIterItemsObject.descr_next),
__length_hint__ = interp2app(W_BaseDictMultiIterObject.descr_length_hint),
__reduce__ = interp2app(W_BaseDictMultiIterObject.descr_reduce),
)
W_DictMultiIterKeysObject.typedef = TypeDef(
"dict_iterkeys",
__iter__ = interp2app(W_DictMultiIterKeysObject.descr_iter),
next = interp2app(W_DictMultiIterKeysObject.descr_next),
__length_hint__ = interp2app(W_BaseDictMultiIterObject.descr_length_hint),
__reduce__ = interp2app(W_BaseDictMultiIterObject.descr_reduce),
)
W_DictMultiIterValuesObject.typedef = TypeDef(
"dict_itervalues",
__iter__ = interp2app(W_DictMultiIterValuesObject.descr_iter),
next = interp2app(W_DictMultiIterValuesObject.descr_next),
__length_hint__ = interp2app(W_BaseDictMultiIterObject.descr_length_hint),
__reduce__ = interp2app(W_BaseDictMultiIterObject.descr_reduce),
)
# ____________________________________________________________
# Views
class W_DictViewObject(W_Root):
def __init__(self, space, w_dict):
self.w_dict = w_dict
def descr_repr(self, space):
w_seq = space.call_function(space.w_list, self)
w_repr = space.repr(w_seq)
return space.wrap("%s(%s)" % (space.type(self).getname(space),
space.str_w(w_repr)))
def descr_len(self, space):
return space.len(self.w_dict)
def _all_contained_in(space, w_dictview, w_other):
for w_item in space.iteriterable(w_dictview):
if not space.contains_w(w_other, w_item):
return space.w_False
return space.w_True
def _is_set_like(w_other):
from pypy.objspace.std.setobject import W_BaseSetObject
return (isinstance(w_other, W_BaseSetObject) or
isinstance(w_other, W_DictViewKeysObject) or
isinstance(w_other, W_DictViewItemsObject))
class SetLikeDictView(object):
_mixin_ = True
def descr_eq(self, space, w_other):
if not _is_set_like(w_other):
return space.w_NotImplemented
if space.len_w(self) == space.len_w(w_other):
return _all_contained_in(space, self, w_other)
return space.w_False
descr_ne = negate(descr_eq)
def descr_lt(self, space, w_other):
if not _is_set_like(w_other):
return space.w_NotImplemented
if space.len_w(self) < space.len_w(w_other):
return _all_contained_in(space, self, w_other)
return space.w_False
def descr_le(self, space, w_other):
if not _is_set_like(w_other):
return space.w_NotImplemented
if space.len_w(self) <= space.len_w(w_other):
return _all_contained_in(space, self, w_other)
return space.w_False
def descr_gt(self, space, w_other):
if not _is_set_like(w_other):
return space.w_NotImplemented
if space.len_w(self) > space.len_w(w_other):
return _all_contained_in(space, w_other, self)
return space.w_False
def descr_ge(self, space, w_other):
if not _is_set_like(w_other):
return space.w_NotImplemented
if space.len_w(self) >= space.len_w(w_other):
return _all_contained_in(space, w_other, self)
return space.w_False
def _as_set_op(name, methname):
@func_renamer('descr_' + name)
def op(self, space, w_other):
w_set = space.call_function(space.w_set, self)
space.call_method(w_set, methname, w_other)
return w_set
@func_renamer('descr_r' + name)
def rop(self, space, w_other):
w_set = space.call_function(space.w_set, w_other)
space.call_method(w_set, methname, self)
return w_set
return op, rop
descr_sub, descr_rsub = _as_set_op('sub', 'difference_update')
descr_and, descr_rand = _as_set_op('and', 'intersection_update')
descr_or, descr_ror = _as_set_op('or', 'update')
descr_xor, descr_rxor = _as_set_op('xor', 'symmetric_difference_update')
class W_DictViewItemsObject(W_DictViewObject, SetLikeDictView):
def descr_iter(self, space):
return W_DictMultiIterItemsObject(space, self.w_dict.iteritems())
class W_DictViewKeysObject(W_DictViewObject, SetLikeDictView):
def descr_iter(self, space):
return W_DictMultiIterKeysObject(space, self.w_dict.iterkeys())
class W_DictViewValuesObject(W_DictViewObject):
def descr_iter(self, space):
return W_DictMultiIterValuesObject(space, self.w_dict.itervalues())
W_DictViewItemsObject.typedef = TypeDef(
"dict_items",
__repr__ = interp2app(W_DictViewItemsObject.descr_repr),
__len__ = interp2app(W_DictViewItemsObject.descr_len),
__iter__ = interp2app(W_DictViewItemsObject.descr_iter),
__eq__ = interp2app(W_DictViewItemsObject.descr_eq),
__ne__ = interp2app(W_DictViewItemsObject.descr_ne),
__lt__ = interp2app(W_DictViewItemsObject.descr_lt),
__le__ = interp2app(W_DictViewItemsObject.descr_le),
__gt__ = interp2app(W_DictViewItemsObject.descr_gt),
__ge__ = interp2app(W_DictViewItemsObject.descr_ge),
__sub__ = interp2app(W_DictViewItemsObject.descr_sub),
__rsub__ = interp2app(W_DictViewItemsObject.descr_rsub),
__and__ = interp2app(W_DictViewItemsObject.descr_and),
__rand__ = interp2app(W_DictViewItemsObject.descr_rand),
__or__ = interp2app(W_DictViewItemsObject.descr_or),
__ror__ = interp2app(W_DictViewItemsObject.descr_ror),
__xor__ = interp2app(W_DictViewItemsObject.descr_xor),
__rxor__ = interp2app(W_DictViewItemsObject.descr_rxor),
)
W_DictViewKeysObject.typedef = TypeDef(
"dict_keys",
__repr__ = interp2app(W_DictViewKeysObject.descr_repr),
__len__ = interp2app(W_DictViewKeysObject.descr_len),
__iter__ = interp2app(W_DictViewKeysObject.descr_iter),
__eq__ = interp2app(W_DictViewKeysObject.descr_eq),
__ne__ = interp2app(W_DictViewKeysObject.descr_ne),
__lt__ = interp2app(W_DictViewKeysObject.descr_lt),
__le__ = interp2app(W_DictViewKeysObject.descr_le),
__gt__ = interp2app(W_DictViewKeysObject.descr_gt),
__ge__ = interp2app(W_DictViewKeysObject.descr_ge),
__sub__ = interp2app(W_DictViewKeysObject.descr_sub),
__rsub__ = interp2app(W_DictViewKeysObject.descr_rsub),
__and__ = interp2app(W_DictViewKeysObject.descr_and),
__rand__ = interp2app(W_DictViewKeysObject.descr_rand),
__or__ = interp2app(W_DictViewKeysObject.descr_or),
__ror__ = interp2app(W_DictViewKeysObject.descr_ror),
__xor__ = interp2app(W_DictViewKeysObject.descr_xor),
__rxor__ = interp2app(W_DictViewKeysObject.descr_rxor),
)
W_DictViewValuesObject.typedef = TypeDef(
"dict_values",
__repr__ = interp2app(W_DictViewValuesObject.descr_repr),
__len__ = interp2app(W_DictViewValuesObject.descr_len),
__iter__ = interp2app(W_DictViewValuesObject.descr_iter),
)
|
the-stack_106_29422 | def patternMatch(sentences):
deconstructedSentences = []
totalWords = []
for sentence in sentences:
words = sentence.split()
deconstructedSentences.append(words)
totalWords.extend(words)
allWords = set(totalWords)
pattern = []
patterns = []
for word in allWords:
offset = 0
for x in range(0, 5):
sentenceWords = []
for sentence in deconstructedSentences:
if word in sentence:
wordPosition = sentence.index(word) + offset
if wordPosition < len(sentence):
sentenceWords.append(sentence[wordPosition])
if len(set(sentenceWords)) == 1:
pattern.append(sentenceWords[0])
else:
pattern.append("_")
offset += 1
patterns.append(list(pattern))
pattern = []
return patterns
print(patternMatch(["you suck so much bro", "you moron so much bro", "you slappo so much bro"])) |
the-stack_106_29426 | import os
import subprocess
import json
from voter import create_toxic_result, create_binary_result
from argparse import ArgumentParser
def list_configs(path):
files = []
if not os.path.isdir(path):
raise Exception(f"The path given is not valid. Path: {path}")
for config_file in os.listdir(path):
if config_file.endswith('.json') and 'train' in config_file:
files.append(os.path.join(path, config_file))
print(files)
return files
def make_multilingual(files):
all_files = files.copy()
for file_ in files:
path = '.'.join(file_.split('.')[:-1])
extension = file_.split('.')[-1]
config = json.load(open(file_))
new_config = config.copy()
if new_config['bert_model'] != 'bert-base-multilingual-cased':
new_config['bert_model'] = 'bert-base-multilingual-cased'
if '_whole_data' in path:
new_path = path[:-len('_whole_data')] + '_multilingual_whole_data'
new_config['model_path'] = new_config['model_path'][:-len('_whole_data')] + '_multilingual_whole_data'
else:
new_path = path + '_multilingual'
new_config['model_path'] = new_config['model_path'] + '_multilingual'
new_path = '.'.join([new_path, extension])
with open(new_path, 'w') as multi_path:
json.dump(new_config, multi_path, indent=4)
all_files.append(new_path)
return all_files
def make_new_configs(best_epoch, config, test_file_path, test, train_config, cat_name, ext, test_dir):
for epoch, num in best_epoch.items():
new_config = config.copy()
new_config['data_path'] = [test_file_path, test_file_path]
new_config['model_path'] = new_config['model_path'] + f'_whole_data_epoch{num[1]}'
new_config['train_test'] = 'predict'
new_config['predict_path'] = f"predicted/{test}_{train_config}_{cat_name}_{epoch}.csv"
# new_config['mode'] = 'binary'
# if '{}' in new_config['model_path']:
new_config['model_path'] = new_config['model_path'].format(cat_name) # 'TOXIC')
if test == 'GermEval21':
new_config['predict_path'] = f"predicted/NLE/{test}_{train_config}_{{}}_{epoch}.csv"
new_config['mode'] = 'binary_categories'
new_config['data_type'] = 'facebook'
elif 'HASOC' in test:
new_config['data_type'] = 'hasoc'
else:
new_config['data_type'] = 'twitter'
filename = train_config.replace('train', 'test') + f"_{epoch}.{ext}"
if cat_name in new_config['model_path']:
filename = train_config.replace('train', 'test') + f"_{cat_name}_{epoch}.{ext}"
with open(os.path.join(test_dir, filename), 'w') as test_config:
json.dump(new_config, test_config, indent=4)
def run_configs(files, test_path):
with open(test_path) as tp:
data_files = json.load(tp)
base_dir = os.path.dirname(files[0])
for file_ in files:
print(file_)
output = subprocess.run(['python3', 'train_model.py', '--config', file_],
stdout=subprocess.PIPE)
print(output)
train_config = '.'.join(os.path.basename(file_).split('.')[:-1])
ext = file_.split('.')[-1]
config = json.load(open(file_))
if 'whole_data' in file_:
continue
best_epoch = {'macro_F1': [0, 0], 'toxic_F1': [0, 0], 'toxic_precision': [0, 0]}
best_epochs = {'ABUSE': {'macro_F1': [0, 0], 'toxic_F1': [0, 0], 'toxic_precision': [0, 0]},
'PROFANITY': {'macro_F1': [0, 0], 'toxic_F1': [0, 0], 'toxic_precision': [0, 0]},
'INSULT': {'macro_F1': [0, 0], 'toxic_F1': [0, 0], 'toxic_precision': [0, 0]}}
epochs = output.stdout.decode("utf-8").replace('\'', '\"').split("\n")
for i, epoch in enumerate(epochs):
if not epoch.startswith('{'):
continue
epoch_data = json.loads(epoch)
cat_name = 'OFFENSE' if 'OFFENSE' in epoch_data else \
('TOXIC' if 'TOXIC' in epoch_data else ('HOF' if 'HOF' in epoch_data else None))
if cat_name is None:
cat_name = 'ABUSE' if 'ABUSE' in epoch_data else \
('PROFANITY' if 'PROFANITY' in epoch_data else ('INSULT' if 'INSULT' in epoch_data else None))
if cat_name is None:
continue
if cat_name in best_epochs:
if epoch_data['macro avg']['f1-score'] > best_epochs[cat_name]['macro_F1'][0]:
best_epochs[cat_name]['macro_F1'][0] = epoch_data['macro avg']['f1-score']
best_epochs[cat_name]['macro_F1'][1] = i % 10
if epoch_data[cat_name]['f1-score'] > best_epochs[cat_name]['toxic_F1'][0]:
best_epochs[cat_name]['toxic_F1'][0] = epoch_data[cat_name]['f1-score']
best_epochs[cat_name]['toxic_F1'][1] = i % 10
if epoch_data[cat_name]['precision'] > best_epochs[cat_name]['toxic_precision'][0]:
best_epochs[cat_name]['toxic_precision'][0] = epoch_data[cat_name]['precision']
best_epochs[cat_name]['toxic_precision'][1] = i % 10
else:
if epoch_data['macro avg']['f1-score'] > best_epoch['macro_F1'][0]:
best_epoch['macro_F1'][0] = epoch_data['macro avg']['f1-score']
best_epoch['macro_F1'][1] = i
if epoch_data[cat_name]['f1-score'] > best_epoch['toxic_F1'][0]:
best_epoch['toxic_F1'][0] = epoch_data[cat_name]['f1-score']
best_epoch['toxic_F1'][1] = i
if epoch_data[cat_name]['precision'] > best_epoch['toxic_precision'][0]:
best_epoch['toxic_precision'][0] = epoch_data[cat_name]['precision']
best_epoch['toxic_precision'][1] = i
for test, test_file_path in data_files.items():
test_dir = os.path.join(base_dir, test)
if not os.path.exists(test_dir):
os.mkdir(test_dir)
if cat_name not in best_epochs:
make_new_configs(best_epoch, config, test_file_path, test, train_config, cat_name, ext, test_dir)
else:
for (cat_name, best_epoch) in best_epochs.items():
make_new_configs(best_epoch, config, test_file_path, test, train_config, cat_name, ext, test_dir)
for test in data_files:
test_dir = os.path.join(base_dir, test)
for conf in os.listdir(test_dir):
conf_path = os.path.join(test_dir, conf)
print(conf_path)
subprocess.run(['python3', 'train_model.py', '--config', conf_path])
def vote(predicted_path, test_path):
with open(test_path) as tp:
data_files = json.load(tp)
results = []
for pred in os.listdir(predicted_path):
pred_path = os.path.join(predicted_path, pred)
print(pred)
if pred.split('_')[0] == 'GermEval21':
output = create_toxic_result(pred_path, data_files['GermEval21'],
os.path.join(predicted_path, 'result', pred), 'toxic')
elif pred.startswith('HASOC'):
output = create_binary_result(pred_path, data_files['_'.join([pred.split('_')[0], pred.split('_')[1]])],
os.path.join(predicted_path, 'result', pred), hasoc=True)
elif pred.split('_')[0] in data_files:
output = create_binary_result(pred_path, data_files[pred.split('_')[0]],
os.path.join(predicted_path, 'result', pred), hasoc=False)
else:
continue
off_cat = '1' if '1' in output else ('HOF' if 'HOF' in output else ('OFFENSE' if 'OFFENSE' in output else None))
if off_cat is None:
off_cat = "OFFENSE"
output["OFFENSE"] = {'precision': 0, 'recall': 0, 'f1-score': 0}
no_cat = '0' if '0' in output else ('NOT' if 'NOT' in output else ('OTHER' if 'OTHER' in output else None))
if no_cat is None:
no_cat = "OTHER"
output["OTHER"] = {'precision': 0, 'recall': 0, 'f1-score': 0}
results.append(f'&{pred}\n'
f'&{round(output[off_cat]["precision"]*100, 1)}'
f'&{round(output[off_cat]["recall"]*100, 1)}'
f'&{round(output[off_cat]["f1-score"]*100, 1)}\n'
f'&{round(output[no_cat]["precision"]*100, 1)}'
f'&{round(output[no_cat]["recall"]*100, 1)}'
f'&{round(output[no_cat]["f1-score"]*100, 1)}\n'
f'&{round(output["macro avg"]["precision"]*100, 1)}'
f'&{round(output["macro avg"]["recall"]*100, 1)}'
f'&{round(output["macro avg"]["f1-score"]*100, 1)}\n\n')
with open("latex_results.txt", 'w') as latex:
latex.write('\n'.join(sorted(results)))
if __name__ == '__main__':
arg_parser = ArgumentParser()
arg_parser.add_argument('--mode', choices=['train', 'result'], default='train',
help='Train models (train) and run the best of them on the test files, '
'or generate predictions (result) using the output files of the train mode.')
arg_parser.add_argument('--configs', default='configs', help='The path to the training config files.')
arg_parser.add_argument('--multilingual', action='store_true', help='Generate multilingual train configs.')
arg_parser.add_argument('--test_path', help='Json file containing the test files.', default='test.json')
args = arg_parser.parse_args()
if args.mode == 'train':
train_configs = list_configs(args.configs)
if args.multilingual:
train_configs = make_multilingual(train_configs)
run_configs(train_configs, args.test_path)
else:
vote(args.configs, args.test_path)
|
the-stack_106_29428 | """
Facebook's Prophet
Since Prophet install can be finicky on Windows, it will be an optional dependency.
"""
import datetime
import numpy as np
import pandas as pd
from autots.models.base import ModelObject, PredictionObject
import logging
# https://stackoverflow.com/questions/27361427/how-to-properly-deal-with-optional-features-in-python
try:
try: # no idea when they switched
from fbprophet import Prophet
except Exception:
from prophet import Prophet
except Exception: # except ImportError
_has_prophet = False
else:
_has_prophet = True
def seek_the_oracle(current_series, args, series, forecast_length, future_regressor):
"""Prophet for for loop or parallel."""
current_series = current_series.rename(columns={series: 'y'})
current_series['ds'] = current_series.index
m = Prophet(interval_width=args['prediction_interval'])
if args['holiday']:
m.add_country_holidays(country_name=args['holiday_country'])
if args['regression_type'] == 'User':
current_series = pd.concat([current_series, args['regressor_train']], axis=1)
for nme in args['regressor_name']:
m.add_regressor(nme)
m = m.fit(current_series)
future = m.make_future_dataframe(periods=forecast_length)
if args['regression_type'] == 'User':
if future_regressor.ndim > 1:
# a = args['dimensionality_reducer'].transform(future_regressor)
if future_regressor.shape[1] > 1:
ft_regr = (
future_regressor.mean(axis=1)
.to_frame()
.merge(
future_regressor.std(axis=1).to_frame(),
left_index=True,
right_index=True,
)
)
else:
ft_regr = future_regressor.copy()
ft_regr.columns = args['regressor_train'].columns
regr = pd.concat([args['regressor_train'], ft_regr])
regr.index.name = 'ds'
regr.reset_index(drop=False, inplace=True)
future = future.merge(regr, on="ds", how='left')
else:
a = np.append(args['regressor_train'], future_regressor.values)
future[args['regressor_name']] = a
fcst = m.predict(future)
fcst = fcst.tail(forecast_length) # remove the backcast
forecast = fcst['yhat']
forecast.name = series
lower_forecast = fcst['yhat_lower']
lower_forecast.name = series
upper_forecast = fcst['yhat_upper']
upper_forecast.name = series
return (forecast, lower_forecast, upper_forecast)
class FBProphet(ModelObject):
"""Facebook's Prophet
'thou shall count to 3, no more, no less, 3 shall be the number thou shall count, and the number of the counting
shall be 3. 4 thou shall not count, neither count thou 2, excepting that thou then preceed to 3.' -Python
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
holiday (bool): If true, include holidays
regression_type (str): type of regression (None, 'User')
"""
def __init__(
self,
name: str = "FBProphet",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday: bool = False,
regression_type: str = None,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
regression_type=regression_type,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
self.holiday = holiday
self.regressor_name = []
def fit(self, df, future_regressor=[]):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
if not _has_prophet:
raise ImportError("Package prophet is required")
df = self.basic_profile(df)
self.regressor_train = None
self.dimensionality_reducer = None
if self.regression_type == 'User':
if future_regressor.shape[0] != df.shape[0]:
self.regression_type = None
else:
if future_regressor.ndim > 1:
if future_regressor.shape[1] > 1:
regr = pd.concat(
[df.mean(axis=1).to_frame(), df.std(axis=1).to_frame()],
axis=1,
)
regr.columns = [0, 1]
else:
regr = future_regressor
regr.columns = [
str(colr) if colr not in df.columns else str(colr) + "xxxxx"
for colr in regr.columns
]
self.regressor_train = regr
self.regressor_name = regr.columns.tolist()
"""
from sklearn.decomposition import PCA
self.dimensionality_reducer = PCA(n_components=1).fit(
future_regressor
)
self.regressor_train = self.dimensionality_reducer.transform(
future_regressor
)
"""
else:
self.regressor_train = future_regressor.copy()
# this is a hack to utilize regressors with a name unlikely to exist
random_two = "n9032380gflljWfu8233koWQop3"
random_one = "prophet_staging_regressor"
self.regressor_name = [
random_one if random_one not in df.columns else random_two
]
self.df_train = df
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int,
future_regressor=[],
just_point_forecast: bool = False,
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
if not _has_prophet:
raise ImportError("Package prophet is required")
predictStartTime = datetime.datetime.now()
# if self.regression_type != None:
# assert len(future_regressor) == forecast_length, "regressor not equal to forecast length"
test_index = self.create_forecast_index(forecast_length=forecast_length)
if self.verbose <= 0:
logging.getLogger('fbprophet').setLevel(logging.WARNING)
args = {
'holiday': self.holiday,
'holiday_country': self.holiday_country,
'regression_type': self.regression_type,
'regressor_name': self.regressor_name,
'regressor_train': self.regressor_train,
'dimensionality_reducer': self.dimensionality_reducer,
'prediction_interval': self.prediction_interval,
}
parallel = True
cols = self.df_train.columns.tolist()
if self.n_jobs in [0, 1] or len(cols) < 4:
parallel = False
else:
try:
from joblib import Parallel, delayed
except Exception:
parallel = False
# joblib multiprocessing to loop through series
if parallel:
verbs = 0 if self.verbose < 1 else self.verbose - 1
df_list = Parallel(n_jobs=self.n_jobs, verbose=(verbs))(
delayed(seek_the_oracle)(
current_series=self.df_train[col].to_frame(),
args=args,
series=col,
forecast_length=forecast_length,
future_regressor=future_regressor,
)
for col in cols
)
else:
df_list = []
for col in cols:
df_list.append(
seek_the_oracle(
self.df_train[col].to_frame(),
args,
col,
forecast_length=forecast_length,
future_regressor=future_regressor,
)
)
complete = list(map(list, zip(*df_list)))
forecast = pd.concat(complete[0], axis=1)
forecast.index = test_index
forecast = forecast[self.column_names]
lower_forecast = pd.concat(complete[1], axis=1)
lower_forecast.index = test_index
lower_forecast = lower_forecast[self.column_names]
upper_forecast = pd.concat(complete[2], axis=1)
upper_forecast.index = test_index
upper_forecast = upper_forecast[self.column_names]
if just_point_forecast:
return forecast
else:
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=forecast.index,
forecast_columns=forecast.columns,
lower_forecast=lower_forecast,
forecast=forecast,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
holiday_choice = np.random.choice(a=[True, False], size=1, p=[0.5, 0.5]).item()
regression_list = [None, 'User']
regression_probability = [0.8, 0.2]
regression_choice = np.random.choice(
a=regression_list, size=1, p=regression_probability
).item()
parameter_dict = {
'holiday': holiday_choice,
'regression_type': regression_choice,
}
return parameter_dict
def get_params(self):
"""Return dict of current parameters."""
parameter_dict = {
'holiday': self.holiday,
'regression_type': self.regression_type,
}
return parameter_dict
|
the-stack_106_29431 | # -*- coding: utf-8 -*-
'''
Project: Product Aesthetic Design: A Machine Learning Augmentation
Authors: Alex Burnap, Yale University
Email: [email protected]
License: MIT License
OSS Code Attribution (see Licensing Inheritance):
Portions of Code From or Modified from Open Source Projects:
https://github.com/tkarras/progressive_growing_of_gans
https://github.com/AaltoVision/pioneer
https://github.com/DmitryUlyanov/AGE
https://github.com/akanimax/attn_gan_pytorch/
'''
import h5py
import numpy as np
import torch
import config
c = config.c
class Chairs(torch.utils.data.Dataset):
"""
This is the chair dataset for the open source / open data code release.
It is different than the car dataset (primary dataset) in the paper due to
data mapping, such that this code may not be as efficient as possible for the
chair dataset.
The dataset is built on wrapping around the Torch Dataset object as well as HDF5
for the underlying dataformat. This is a very fast data format and supports both
loading into RAM or directly off disk.
Make sure your HDF5 installation is updated to support SWMR mode for parallel
access, as most default OS packages are older than this support.
"""
def __init__(self,
use_RAM,
train_x=None,
train_y=None,
valid_x=None,
valid_y=None,
test_x=None,
test_y=None,
c=None):
self.use_RAM = use_RAM
if train_x is not None:
self.train_x = train_x
self.train_y = train_y
self.valid_x = valid_x
self.valid_y = valid_y
self.test_x = test_x
self.test_y = test_y
assert c is not None
self.c = c
resolution = ['IMG_8', 'IMG_16', 'IMG_32', 'IMG_64', 'IMG_128', 'IMG_256', 'IMG_512']
self._base_key = 'IMG_'
self._base_masks_key = 'IMG_'
if self.use_RAM:
print('Loading Images into RAM...')
self.dataset = h5py.File(self.c.images_dir, 'r', driver='core')
if self.c.use_masks:
self.masks = h5py.File(self.c.masks_dir, 'r', driver='core')
print('Done loading Images into RAM...')
else:
self.dataset = h5py.File(self.c.images_dir, 'r')
if self.c.use_masks:
self.masks = h5py.File(self.c.masks_dir, 'r')
self.chair_full_inds = np.loadtxt(self.c.dining_room_chair_full_inds_dir, dtype=int)
print('{} chairs in the dataset'.format(self.chair_full_inds.shape[0]))
self.chair_labeled_inds = np.loadtxt(self.c.dining_room_chair_labeled_inds_dir, dtype=int)
self._len = {k: len(self.dataset[k]) for k in resolution}
# self.num_data = self._len['data8x8']
self.num_data = self.chair_full_inds.shape[0]
assert all([resol in self.dataset.keys() for resol in resolution])
# Training image inds
# image_ids outside train, valid, test - take % of that
# then concat back with train_image_inds as training_image_inds
# Note: For chairs there is a different mapping scheme than vehicles dataset, so this code is unnecessarily complex
# self.design_ids_of_images = design_ids
self.design_ids_of_images = self.chair_full_inds
train_design_ids, valid_design_ids, test_design_ids = np.unique(self.train_x), np.unique(self.valid_x), np.unique(self.test_x)
# train_bool_array = np.isin(self.design_ids_of_images, train_design_ids)
# self.training_image_inds = np.nonzero(train_bool_array)[0]
# num_training_inds = len(self.training_image_inds)
# labeled_design_ids = np.concatenate((train_design_ids, valid_design_ids, test_design_ids))
unlabeled_bool_array = ~np.isin(self.chair_full_inds, self.chair_labeled_inds)
self.unlabeled_image_inds = np.nonzero(unlabeled_bool_array)[0]
if self.c.percentage_of_unlabeled_data != 1.0: # 0.5 # 1.0, 0.5, 0.25; 1.0 is full training set
num_unlabeled = int(self.c.percentage_of_unlabeled_data * len(self.unlabeled_image_inds))
self.unlabeled_image_inds = np.random.choice(self.unlabeled_image_inds, size=num_unlabeled, replace=False)
self.training_image_inds = np.concatenate((train_design_ids, self.unlabeled_image_inds))
self.training_mean = np.mean(np.abs(self.valid_y - self.train_y.mean()))
self.training_median = np.mean(np.abs(self.valid_y - np.median(self.train_y)))
self.training_mid = np.mean(np.abs(self.valid_y - 3.0))
print("\nTotal Unlabeled Data at {}%: {}".format(int(self.c.percentage_of_unlabeled_data*100),
( len(self.unlabeled_image_inds)) * self.c.number_viewpoints_per_product))
# Sideview image ids
self.random_image_idxs = np.arange(self.c.number_viewpoints_per_product)
# self.valid_x_raveled = np.array([[ind, view] for ind in self.valid_x for view in np.arange(self.c.number_viewpoints_per_product)], dtype=np.uint8)
self.valid_x_repeated = self.valid_x.repeat(self.c.number_viewpoints_per_product) # np.array([[ind, view] for ind in self.valid_x for view in np.arange(self.c.number_viewpoints_per_product)], dtype=np.uint8)
self.valid_y_repeated = self.valid_y.repeat(self.c.number_viewpoints_per_product)
self.test_x_repeated = self.test_x.repeat(self.c.number_viewpoints_per_product)
self.test_y_repeated = self.test_y.repeat(self.c.number_viewpoints_per_product)
# self.random_image_idxs = np.array([0, 1, 2, 3, 15, 16, 17, 18, 19, 20, 21, 33, 34, 35])
self.random_image_idxs = np.array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
def get_random_image_ids_given_design_ids(self, design_ids):
random_views = np.random.choice(self.random_image_idxs+self.c.rated_viewpoint, size=len(design_ids), replace=True)
# image_ids = np.array([
# np.where(self.design_ids_of_images == elm)[0][random_views] for elm in design_ids])
image_ids = np.array([design_ids, random_views]).swapaxes(0,1)
return image_ids
# return image_ids.flatten()
# def get_side_image_id_given_design_ids(self, design_ids):
# image_ids = np.array([
# np.where(self.design_ids_of_images == elm)[0][0] for elm in design_ids])
# return image_ids.flatten()
def get_side_image_id_given_design_ids(self, design_ids):
image_ids = np.array([design_ids, np.ones(design_ids.shape[0], dtype=int)*self.c.rated_viewpoint])
image_ids = image_ids.swapaxes(0, 1)
return image_ids
def __call__(self,
batch_size,
phase,
alpha,
with_masks=True,
# with_masks=False,
with_ratings=False,
data_split='train',
batch_ind=None,
side_view=False):
size = 4 * (2 ** phase)
key = self._base_key + '{}'.format(size)
if not with_ratings:
# Can't do fast lookup anymore, need to do slow functions using code from ratings below
# image_ids = np.random.choice(self.training_image_inds, size=batch_size, replace=True)
if batch_ind is None:
# inds = np.random.randint(self.train_x.shape[0], size=batch_size)
# This is for training rated data + unlabelled data
design_ids = np.random.choice(self.training_image_inds, size=batch_size, replace=True)
else:
# Null case here
# inds = np.arange(self.train_x.shape[0])[batch_ind * batch_size: (batch_ind + 1) * batch_size]
# design_ids = self.train_x[inds]
raise Exception("got to null case for chair design")
if not side_view:
image_ids = self.get_random_image_ids_given_design_ids(design_ids)
else:
image_ids = self.get_side_image_id_given_design_ids(design_ids)
else:
if data_split == 'train':
if batch_ind is None:
inds = np.random.randint(self.train_x.shape[0], size=batch_size)
else:
inds = np.arange(self.train_x.shape[0])[batch_ind * batch_size: (batch_ind + 1) * batch_size]
design_ids = self.train_x[inds]
batch_ratings = self.train_y[inds].astype(np.float32)
elif data_split == 'valid':
if batch_ind is None:
inds = np.random.randint(self.valid_x.shape[0], size=batch_size)
else:
inds = np.arange(self.valid_x.shape[0])[batch_ind * batch_size: (batch_ind + 1) * batch_size]
design_ids = self.valid_x[inds]
batch_ratings = self.valid_y[inds].astype(np.float32)
elif data_split == 'test':
if batch_ind is None:
inds = np.random.randint(self.test_x.shape[0], size=batch_size)
else:
inds = np.arange(self.test_x.shape[0])[batch_ind * batch_size: (batch_ind + 1) * batch_size]
design_ids = self.test_x[inds]
batch_ratings = self.test_y[inds].astype(np.float32)
batch_ratings = torch.from_numpy(batch_ratings)
batch_ratings = batch_ratings.reshape(-1, 1)
if not side_view:
image_ids = self.get_random_image_ids_given_design_ids(design_ids)
else:
image_ids = self.get_side_image_id_given_design_ids(design_ids)
# print(image_ids)
hi_res_batch_images = np.array([self.dataset[key][i[0]][i[1]] / 127.5 - 1.0 for i in image_ids], dtype=np.float32)
hi_res_batch_images = hi_res_batch_images.transpose(0, 3, 1, 2) # because this is in RGB vs BGR
batch_attributes = np.array([self.dataset["info"][i[0]][i[1]] for i in image_ids], dtype=np.float32)
if with_masks:
key = self._base_masks_key + '{}'.format(size)
# batch_masks = np.array([self.masks[key][i][:,:,0] / 255.0 for i in image_ids], dtype=np.float32)
# batch_masks = np.array([self.masks[key][i[0]][i[1]][:, :, 0] / 255.0 for i in image_ids], dtype=np.float32)
batch_masks = np.array([self.masks[key][i[0]][i[1]][:, :, 0] for i in image_ids], dtype=np.float32)
batch_masks = np.expand_dims(batch_masks, 3).transpose(0, 3, 1, 2)
batch_masks = torch.from_numpy(batch_masks)
if alpha < 1.0 and phase > 0:
lr_key = self._base_key + '{}'.format(size // 2)
low_res_batch_images = np.array([self.dataset[lr_key][i[0]][i[1]] / 127.5 - 1.0 for i in image_ids],
dtype=np.float32).transpose(0, 3, 1, 2).repeat(2, axis=2).repeat(2, axis=3)
# low_res_batch_images = np.array([self.dataset[lr_key][i] / 127.5 - 1.0 for i in image_ids],
# dtype=np.float32).repeat(2, axis=2).repeat(2, axis=3)
# low_res_batch_images = low_res_batch_images # because this is in RGB vs BGR
batch_images = hi_res_batch_images * alpha + low_res_batch_images * (1.0 - alpha)
else:
batch_images = hi_res_batch_images
batch_images = torch.from_numpy(batch_images)
batch_attributes = torch.from_numpy(batch_attributes)
if not with_ratings:
if self.c.conditional_model:
if with_masks:
return batch_images, batch_attributes, batch_masks
else:
return batch_images, batch_attributes
else:
if self.c.use_masks:
return batch_images, batch_masks
else:
return batch_images
else:
if self.c.conditional_model:
if with_masks:
return batch_images, batch_attributes, batch_masks, batch_ratings
else:
return batch_images, batch_attributes, batch_ratings
else:
if with_masks:
return batch_images, batch_masks, batch_ratings
else:
return batch_images, batch_ratings
def __len__(self):
return self.num_data
def __getitem__(self, index, size=512, with_attributes=False, astorch=False):
# size = 4 * (2 ** phase)
key = self._base_key + '{}'.format(size)
image = self.dataset[key][index] / 127.5 - 1.0
attributes = self.attributes[index]
image = image.astype(np.float32)
attributes = attributes.astype(np.float32)
if astorch:
image, attributes = torch.from_numpy(image), torch.from_numpy(attributes)
if with_attributes:
return image, attributes
else:
return image
# TODO Refactor this
class DataGenerator(object):
def __init__(self):
pass
@staticmethod
def data_generator_phase0(dataloader, batch_size, image_size=4):
while True: # This is an infinite iterator
if c.conditional_model:
if c.use_masks:
batch_images, batch_attributes, batch_masks = dataloader(batch_size, int(np.log2(image_size / 4)), 0.0)
# yield torch.from_numpy(batch_images), torch.from_numpy(batch_attributes), torch.from_numpy(batch_masks)
yield batch_images, batch_attributes, batch_masks
else:
batch_images, batch_attributes = dataloader(batch_size, int(np.log2(image_size / 4)), 0.0)
yield batch_images, batch_attributes
else:
batch_images = dataloader(batch_size, int(np.log2(image_size / 4)), 0.0)
yield batch_images, None # no label
@staticmethod
def data_generator_session(dataloader, batch_size, image_size, session):
""" This is another version of sample data that instead uses session information
"""
while True: # This is an infinite iterator
if c.conditional_model:
if c.use_masks:
batch_images, batch_attributes, batch_masks = dataloader(batch_size, session.phase, session.alpha)
yield batch_images, batch_attributes, batch_masks
else:
batch_images, batch_attributes = dataloader(batch_size, session.phase, session.alpha)
yield batch_images, batch_attributes
else:
batch = dataloader(batch_size, session.phase, session.alpha)
yield batch, None # no label
|
the-stack_106_29434 | #!/usr/bin/env python3
import os
import sys
import subprocess
import argparse
def main():
parser = argparse.ArgumentParser(description='GATK SplitIntervals')
parser.add_argument('-j', '--jvm-mem', dest='jvm_mem', type=int,
help='JVM max memory in MB', default=1000)
parser.add_argument('-R', dest='ref_fa', type=str,
help='Reference genome file (eg, .fa)', required=True)
parser.add_argument('-L', dest='intervals', type=str,
help='Optional interval file with one or more genomic intervals over which to operate')
parser.add_argument('--scatter', dest='scatter_count', type=int,
help='Number of output interval files to split into', required=True)
args = parser.parse_args()
cmd = 'gatk --java-options "-Xmx%sm" SplitIntervals -R %s -O %s -scatter %s' % (
args.jvm_mem, args.ref_fa, os.getcwd(), args.scatter_count
)
if args.intervals:
cmd = cmd + ' -L %s' % args.intervals
try:
p = subprocess.run([cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
print(p.stdout.decode("utf-8"))
if p.returncode != 0:
print('Error occurred: %s' % p.stderr.decode("utf-8"), file=sys.stderr)
sys.exit(p.returncode)
except Exception as e:
sys.exit('Execution failed: %s' % e)
if __name__ == "__main__":
main()
|
the-stack_106_29435 | """engine.SCons.Tool.f03
Tool-specific initialization for the generic Posix f03 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/f03.py 2014/08/24 12:12:31 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Util
import fortran
from SCons.Tool.FortranCommon import add_all_to_env, add_f03_to_env
compilers = ['f03']
def generate(env):
add_all_to_env(env)
add_f03_to_env(env)
fcomp = env.Detect(compilers) or 'f03'
env['F03'] = fcomp
env['SHF03'] = fcomp
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = fcomp
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_106_29436 | # -*- coding: utf-8 -*-
"""
Utilities
=========
Miscellaneous utilities.
"""
# Author: Eric Larson
# License: 3-clause BSD
from __future__ import division, absolute_import, print_function
import hashlib
import os
from shutil import move, copyfile
def scale_image(in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image box with the given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = max_width / float(width_in)
scale_h = max_height / float(height_in)
if height_in * scale_w <= max_height:
scale = scale_w
else:
scale = scale_h
if scale >= 1.0 and in_fname == out_fname:
return
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image using resize; if using .thumbnail and the image is
# already smaller than max_width, max_height, then this won't scale up
# at all (maybe could be an option someday...)
img = img.resize((width_sc, height_sc), Image.BICUBIC)
# img.thumbnail((width_sc, height_sc), Image.BICUBIC)
# width_sc, height_sc = img.size # necessary if using thumbnail
# insert centered
thumb = Image.new('RGBA', (max_width, max_height), (255, 255, 255, 255))
pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
def replace_py_ipynb(fname):
"""Replace .py extension in filename by .ipynb"""
fname_prefix, extension = os.path.splitext(fname)
allowed_extension = '.py'
if extension != allowed_extension:
raise ValueError(
"Unrecognized file extension, expected %s, got %s"
% (allowed_extension, extension))
new_extension = '.ipynb'
return '{}{}'.format(fname_prefix, new_extension)
def get_md5sum(src_file):
"""Returns md5sum of file"""
with open(src_file, 'rb') as src_data:
src_content = src_data.read()
return hashlib.md5(src_content).hexdigest()
def _replace_md5(fname_new, fname_old=None, method='move'):
assert method in ('move', 'copy')
if fname_old is None:
assert fname_new.endswith('.new')
fname_old = os.path.splitext(fname_new)[0]
if os.path.isfile(fname_old) and (get_md5sum(fname_old) ==
get_md5sum(fname_new)):
if method == 'move':
os.remove(fname_new)
else:
if method == 'move':
move(fname_new, fname_old)
else:
copyfile(fname_new, fname_old)
assert os.path.isfile(fname_old)
class Bunch(dict):
"""Dictionary-like object that exposes its keys as attributes."""
def __init__(self, **kwargs): # noqa: D102
dict.__init__(self, kwargs)
self.__dict__ = self
|
the-stack_106_29437 | from dataclasses import dataclass, field
from typing import Optional
from xsdata.models.datatype import XmlPeriod
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-gMonth-minExclusive-4-NS"
@dataclass
class NistschemaSvIvAtomicGMonthMinExclusive4:
class Meta:
name = "NISTSchema-SV-IV-atomic-gMonth-minExclusive-4"
namespace = "NISTSchema-SV-IV-atomic-gMonth-minExclusive-4-NS"
value: Optional[XmlPeriod] = field(
default=None,
metadata={
"required": True,
"min_exclusive": XmlPeriod("--04"),
}
)
|
the-stack_106_29438 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.model.document import Document
from frappe.utils import cstr
from frappe import _
from hms_tz.hms_tz.utils import make_healthcare_service_order
class PatientEncounter(Document):
def validate(self):
self.set_title()
if self.drug_prescription:
for drug in self.drug_prescription:
if not drug.quantity or drug.quantity == 0:
drug.quantity = get_quantity(drug)
def on_update(self):
if self.appointment:
frappe.db.set_value('Patient Appointment', self.appointment, 'status', 'Closed')
update_encounter_medical_record(self)
def after_insert(self):
insert_encounter_to_medical_record(self)
def on_submit(self):
update_encounter_medical_record(self)
create_therapy_plan(self)
create_healthcare_service_order(self)
# make_insurance_claim(self)
def on_cancel(self):
if self.appointment:
frappe.db.set_value('Patient Appointment', self.appointment, 'status', 'Open')
delete_medical_record(self)
def set_title(self):
self.title = _('{0} with {1}').format(self.patient_name or self.patient,
self.practitioner_name or self.practitioner)[:100]
def create_therapy_plan(encounter):
if len(encounter.therapies):
doc = frappe.new_doc('Therapy Plan')
doc.patient = encounter.patient
doc.company = encounter.company
doc.start_date = encounter.encounter_date
for entry in encounter.therapies:
doc.append('therapy_plan_details', {
'therapy_type': entry.therapy_type,
'no_of_sessions': entry.no_of_sessions,
"prescribe": entry.prescribe or 0
})
doc.save(ignore_permissions=True)
if doc.get('name'):
encounter.db_set('therapy_plan', doc.name)
frappe.msgprint(_('Therapy Plan {0} created successfully.').format(frappe.bold(doc.name)), alert=True)
def insert_encounter_to_medical_record(doc):
subject = set_subject_field(doc)
medical_record = frappe.new_doc('Patient Medical Record')
medical_record.patient = doc.patient
medical_record.subject = subject
medical_record.status = 'Open'
medical_record.communication_date = doc.encounter_date
medical_record.reference_doctype = 'Patient Encounter'
medical_record.reference_name = doc.name
medical_record.reference_owner = doc.owner
medical_record.save(ignore_permissions=True)
def update_encounter_medical_record(encounter):
medical_record_id = frappe.db.exists('Patient Medical Record', {'reference_name': encounter.name})
if medical_record_id and medical_record_id[0][0]:
subject = set_subject_field(encounter)
frappe.db.set_value('Patient Medical Record', medical_record_id[0][0], 'subject', subject)
else:
insert_encounter_to_medical_record(encounter)
def delete_medical_record(encounter):
frappe.delete_doc_if_exists('Patient Medical Record', 'reference_name', encounter.name)
def set_subject_field(encounter):
subject = frappe.bold(_('Healthcare Practitioner: ')) + encounter.practitioner + '<br>'
if encounter.symptoms:
subject += frappe.bold(_('Symptoms: ')) + '<br>'
for entry in encounter.symptoms:
subject += cstr(entry.complaint) + '<br>'
else:
subject += frappe.bold(_('No Symptoms')) + '<br>'
if encounter.diagnosis:
subject += frappe.bold(_('Diagnosis: ')) + '<br>'
for entry in encounter.diagnosis:
subject += cstr(entry.diagnosis) + '<br>'
else:
subject += frappe.bold(_('No Diagnosis')) + '<br>'
if encounter.drug_prescription:
subject += '<br>' + _('Drug(s) Prescribed.')
if encounter.lab_test_prescription:
subject += '<br>' + _('Test(s) Prescribed.')
if encounter.procedure_prescription:
subject += '<br>' + _('Procedure(s) Prescribed.')
if encounter.therapies:
subject += '<br>' + _('Therapy Prescribed.')
if encounter.radiology_procedure_prescription:
subject += '<br>' + _('Radiology Procedure(s) Prescribed.')
return subject
def create_healthcare_service_order(encounter):
if encounter.drug_prescription:
for drug in encounter.drug_prescription:
medication = frappe.get_doc('Medication', drug.drug_code)
args={
'healthcare_service_order_category': medication.get_value('healthcare_service_order_category'),
'patient_care_type': medication.get_value('patient_care_type'),
'order_date': encounter.get_value('encounter_date'),
'ordered_by': encounter.get_value('practitioner'),
'order_group': encounter.name,
'replaces': drug.get_value('replaces'),
'patient': encounter.get_value('patient'),
'order_doctype': 'Medication',
'order': medication.name,
'order_description': medication.get_value('description'),
'intent': drug.get_value('intent'),
'priority': drug.get_value('priority'),
'quantity': drug.get_value("quantity"),
'sequence': drug.get_value('sequence'),
'expected_date': drug.get_value('expected_date'),
'as_needed': drug.get_value('as_needed'),
'occurrence': drug.get_value('occurrence'),
'staff_role': medication.get_value('staff_role'),
'note': drug.get_value('note'),
'patient_instruction': drug.get_value('patient_instruction'),
'company':encounter.company,
'insurance_subscription' : encounter.insurance_subscription if encounter.insurance_subscription else '',
'order_reference_doctype' : "Drug Prescription",
'order_reference_name' : drug.name
}
make_healthcare_service_order(args)
if encounter.lab_test_prescription:
for labtest in encounter.lab_test_prescription:
lab_template = frappe.get_doc('Lab Test Template', labtest.lab_test_code)
args={
'healthcare_service_order_category': lab_template.get_value('healthcare_service_order_category'),
'patient_care_type': lab_template.get_value('patient_care_type'),
'order_date': encounter.get_value('encounter_date'),
'ordered_by': encounter.get_value('practitioner'),
'order_group': encounter.name,
'replaces': labtest.get_value('replaces'),
'patient': encounter.get_value('patient'),
'order_doctype': 'Lab Test Template',
'order': lab_template.name,
'order_description': lab_template.get_value('lab_test_description'),
'quantity' : 1,
'intent': labtest.get_value('intent'),
'priority': labtest.get_value('priority'),
'sequence': labtest.get_value('sequence'),
'as_needed': labtest.get_value('as_needed'),
'staff_role': lab_template.get_value('staff_role'),
'note': labtest.get_value('note'),
'patient_instruction': labtest.get_value('patient_instruction'),
'healthcare_service_unit_type':lab_template.get_value('healthcare_service_unit_type'),
'company':encounter.company,
'source':encounter.source,
'referring_practitioner':encounter.referring_practitioner,
'insurance_subscription' : encounter.insurance_subscription if encounter.insurance_subscription else '',
'order_reference_doctype' : "Lab Prescription",
'order_reference_name' : labtest.name
}
make_healthcare_service_order(args)
if encounter.procedure_prescription:
for procedure in encounter.procedure_prescription:
procedure_template = frappe.get_doc('Clinical Procedure Template', procedure.procedure)
args={
'healthcare_service_order_category': procedure_template.get_value('healthcare_service_order_category'),
'patient_care_type': procedure_template.get_value('patient_care_type'),
'order_date': encounter.get_value('encounter_date'),
'ordered_by': encounter.get_value('practitioner'),
'order_group': encounter.name,
'replaces': procedure.get_value('replaces'),
'patient': encounter.get_value('patient'),
'order_doctype': 'Clinical Procedure Template',
'order': procedure_template.name,
'order_description': procedure_template.get_value('description'),
'quantity' : 1,
'intent': procedure.get_value('intent'),
'priority': procedure.get_value('priority'),
'sequence': procedure.get_value('sequence'),
'as_needed': procedure.get_value('as_needed'),
'body_part': procedure.get_value('body_part'),
'staff_role': procedure_template.get_value('staff_role'),
'note': procedure.get_value('note'),
'patient_instruction': procedure.get_value('patient_instruction'),
'healthcare_service_unit_type':procedure_template.get_value('healthcare_service_unit_type'),
'company':encounter.company,
'source':encounter.source,
'referring_practitioner':encounter.referring_practitioner,
'insurance_subscription' : encounter.insurance_subscription if encounter.insurance_subscription else '',
'order_reference_doctype' : "Procedure Prescription",
'order_reference_name' : procedure.name
}
make_healthcare_service_order(args)
if encounter.therapies:
for therapy in encounter.therapies:
therapy_type = frappe.get_doc('Therapy Type', therapy.therapy_type)
args={
'healthcare_service_order_category': therapy_type.get_value('healthcare_service_order_category'),
'patient_care_type': therapy_type.get_value('patient_care_type'),
'order_date': encounter.get_value('encounter_date'),
'ordered_by': encounter.get_value('practitioner'),
'order_group': encounter.name,
'replaces': therapy.get_value('replaces'),
'patient': encounter.get_value('patient'),
'order_doctype': 'Therapy Type',
'order': therapy_type.name,
'order_description': therapy_type.get_value('description'),
'quantity' : 1,
'intent': therapy.get_value('intent'),
'priority': therapy.get_value('priority'),
'sequence': therapy.get_value('sequence'),
'as_needed': therapy.get_value('as_needed'),
'staff_role': therapy_type.get_value('staff_role'),
'note': therapy.get_value('note'),
'patient_instruction': therapy.get_value('patient_instruction'),
'company':encounter.company,
'source':encounter.source,
'referring_practitioner':encounter.referring_practitioner,
'insurance_subscription' : encounter.insurance_subscription if encounter.insurance_subscription else '',
'order_reference_doctype' : "Therapy Plan Detail",
'order_reference_name' : therapy.name
# 'healthcare_service_unit_type':therapy_type.get_value('healthcare_service_unit_type')
}
make_healthcare_service_order(args)
if encounter.radiology_procedure_prescription:
for radiology in encounter.radiology_procedure_prescription:
radiology_template = frappe.get_doc('Radiology Examination Template', radiology.radiology_examination_template)
args={
'healthcare_service_order_category': radiology_template.get_value('healthcare_service_order_category'),
'patient_care_type': radiology_template.get_value('patient_care_type'),
'order_date': encounter.get_value('encounter_date'),
'ordered_by': encounter.get_value('practitioner'),
'order_group': encounter.name,
'replaces': radiology.get_value('replaces'),
'patient': encounter.get_value('patient'),
'order_doctype': 'Radiology Examination Template',
'order': radiology_template.name,
'order_description': radiology_template.get_value('description'),
'quantity' : 1,
'intent': radiology.get_value('intent'),
'priority': radiology.get_value('priority'),
'sequence': radiology.get_value('sequence'),
'as_needed': radiology.get_value('as_needed'),
'staff_role': radiology_template.get_value('staff_role'),
'note': radiology.get_value('note'),
'patient_instruction': radiology.get_value('patient_instruction'),
'healthcare_service_unit_type':radiology_template.get_value('healthcare_service_unit_type'),
'company':encounter.company,
'source':encounter.source,
'referring_practitioner':encounter.referring_practitioner,
'insurance_subscription' : encounter.insurance_subscription if encounter.insurance_subscription else '',
'order_reference_doctype' : "Radiology Procedure Prescription",
'order_reference_name' : radiology.name
}
make_healthcare_service_order(args)
@frappe.whitelist()
def create_patient_referral(args):
patient_referral = frappe.new_doc('Patient Referral')
args = json.loads(args)
for key in args:
patient_referral.set(key, args[key] if args[key] else '')
patient_referral.save(ignore_permissions=True)
def make_insurance_claim(doc):
return
if doc.insurance_subscription:
from hms_tz.hms_tz.utils import create_insurance_claim, get_service_item_and_practitioner_charge
billing_item, rate = get_service_item_and_practitioner_charge(doc)
insurance_claim, claim_status = create_insurance_claim(doc, 'Appointment Type', doc.appointment_type, 1, billing_item)
if insurance_claim:
frappe.set_value(doc.doctype, doc.name ,'insurance_claim', insurance_claim)
frappe.set_value(doc.doctype, doc.name ,'claim_status', claim_status)
doc.reload()
def get_quantity(self):
quantity = 0
dosage = None
period = None
if self.dosage:
dosage = frappe.get_doc('Prescription Dosage', self.dosage)
for item in dosage.dosage_strength:
quantity += item.strength
if self.period and self.interval:
period = frappe.get_doc('Prescription Duration', self.period)
if self.interval < period.get_days():
quantity = quantity * (period.get_days()/self.interval)
elif self.interval and self.interval_uom and self.period:
period = frappe.get_doc('Prescription Duration', self.period)
interval_in = self.interval_uom
if interval_in == 'Day' and self.interval < period.get_days():
quantity = period.get_days()/self.interval
elif interval_in == 'Hour' and self.interval < period.get_hours():
quantity = period.get_hours()/self.interval
if quantity > 0:
return quantity
else:
return 1
|
the-stack_106_29439 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.devtools.clouderrorreporting.v1beta1 ReportErrorsService API."""
import pkg_resources
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.path_template
from google.cloud.errorreporting_v1beta1.gapic import enums
from google.cloud.errorreporting_v1beta1.gapic import report_errors_service_client_config
from google.cloud.errorreporting_v1beta1.proto import common_pb2
from google.cloud.errorreporting_v1beta1.proto import error_group_service_pb2
from google.cloud.errorreporting_v1beta1.proto import error_stats_service_pb2
from google.cloud.errorreporting_v1beta1.proto import report_errors_service_pb2
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-error-reporting', ).version
class ReportErrorsServiceClient(object):
"""An API for reporting error events."""
SERVICE_ADDRESS = 'clouderrorreporting.googleapis.com:443'
"""The default address of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
# The name of the interface for this client. This is the key used to find
# method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.devtools.clouderrorreporting.v1beta1.ReportErrorsService'
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
'projects/{project}',
project=project,
)
def __init__(self,
channel=None,
credentials=None,
client_config=report_errors_service_client_config.config,
client_info=None):
"""Constructor.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_config (dict): A dictionary of call options for each
method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments to {} are mutually '
'exclusive.'.format(self.__class__.__name__), )
# Create the channel.
if channel is None:
channel = google.api_core.grpc_helpers.create_channel(
self.SERVICE_ADDRESS,
credentials=credentials,
scopes=self._DEFAULT_SCOPES,
)
# Create the gRPC stubs.
self.report_errors_service_stub = (
report_errors_service_pb2.ReportErrorsServiceStub(channel))
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Write the "inner API call" methods to the class.
# These are wrapped versions of the gRPC stub methods, with retry and
# timeout configuration applied, called by the public methods on
# this class.
self._report_error_event = google.api_core.gapic_v1.method.wrap_method(
self.report_errors_service_stub.ReportErrorEvent,
default_retry=method_configs['ReportErrorEvent'].retry,
default_timeout=method_configs['ReportErrorEvent'].timeout,
client_info=client_info,
)
# Service calls
def report_error_event(self,
project_name,
event,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Report an individual error event.
This endpoint accepts <strong>either</strong> an OAuth token,
<strong>or</strong> an
<a href=\"https://support.google.com/cloud/answer/6158862\">API key</a>
for authentication. To use an API key, append it to the URL as the value of
a ``key`` parameter. For example:
<pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ReportErrorsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>> event = {}
>>>
>>> response = client.report_error_event(project_name, event)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project. Written
as ``projects/`` plus the
`Google Cloud Platform project ID <https://support.google.com/cloud/answer/6158840>`_.
Example: ``projects/my-project-123``.
event (Union[dict, ~google.cloud.errorreporting_v1beta1.types.ReportedErrorEvent]): [Required] The error event to be reported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.errorreporting_v1beta1.types.ReportedErrorEvent`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.ReportErrorEventResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = report_errors_service_pb2.ReportErrorEventRequest(
project_name=project_name,
event=event,
)
return self._report_error_event(
request, retry=retry, timeout=timeout, metadata=metadata)
|
the-stack_106_29441 |
class Node(object):
"""Node Classs."""
def __init__(self, data=None, next_data=None):
"""Initialize a new Node."""
self.data = data
self.next = next_data
class Stack(object):
"""Stack data strcuture."""
def __init__(self, iterable=None):
"""Initialize a new instance of Stack."""
self.top_stack = None
self.count = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, new_stack):
"""Push value into the stack."""
current = self.top_stack
self.top_stack = Node(new_stack, current)
self.count += 1
def pop(self):
"""Pop out the top stack value."""
if self.top_stack:
result = self.top_stack.data
if self.top_stack.next:
self.top_stack = self.top_stack.next
else:
self.top_stack = None
self.count -= 1
return result
else:
raise ValueError('No value to pop')
def __len__(self):
"""Return the size of the stack."""
return self.count
def paren(paren_combo):
"""Check for valid paren combos."""
stack = Stack()
for paren in paren_combo:
stack.push(paren)
count = 0
for paren in range(len(stack)):
current = stack.pop()
if current == ')':
count += 1
elif current == '(':
if count == 0:
return 1
else:
count -= 1
return 0 if count == 0 else -1
|
the-stack_106_29442 | import math
import traceback
from random import randint
import discord
from discord.ext import commands
import cogs.utils.context as context
class Xp(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member) -> None:
if member.bot:
return
if member.guild.id != self.bot.settings.guild_id:
return
user = await self.bot.settings.user(id=member.id)
if user.is_xp_frozen or user.is_clem:
return
if member.guild.id != self.bot.settings.guild_id:
return
level = user.level
db = self.bot.settings.guild()
roles_to_add = await self.assess_new_roles(level, db)
await self.add_new_roles(member, roles_to_add)
@commands.Cog.listener()
async def on_message(self, message):
if not message.guild:
return
if message.guild.id != self.bot.settings.guild_id:
return
if message.author.bot:
return
if message.channel.id == self.bot.settings.guild().channel_botspam:
return
user = await self.bot.settings.user(id=message.author.id)
db = self.bot.settings.guild()
if user.is_xp_frozen or user.is_clem:
return
xp_to_add = randint(0, 11)
new_xp, level_before = await self.bot.settings.inc_xp(message.author.id, xp_to_add)
new_level = await self.get_level(new_xp)
if new_level > level_before:
await self.bot.settings.inc_level(message.author.id)
roles_to_add = await self.assess_new_roles(new_level, db)
await self.add_new_roles(message, roles_to_add)
async def assess_new_roles(self, new_level, db):
roles_to_add = []
if 15 <= new_level:
roles_to_add.append(db.role_memberplus)
if 30 <= new_level:
roles_to_add.append(db.role_memberpro)
if 50 <= new_level:
roles_to_add.append(db.role_memberedition)
return roles_to_add
async def add_new_roles(self, obj, roles_to_add):
if roles_to_add is not None:
if isinstance(obj, discord.Message):
for role in roles_to_add:
role = obj.guild.get_role(role)
if role not in obj.author.roles:
await obj.author.add_roles(role)
elif isinstance(obj, discord.Member):
for role in roles_to_add:
role = obj.guild.get_role(role)
if role not in obj.roles:
await obj.add_roles(role)
async def get_level(self, current_xp):
level = 0
xp = 0
while xp <= current_xp:
xp = xp + 45 * level * (math.floor(level / 10) + 1)
level += 1
return level
async def info_error(self, ctx: context.Context, error):
if (isinstance(error, commands.MissingRequiredArgument)
or isinstance(error, commands.BadArgument)
or isinstance(error, commands.BadUnionArgument)
or isinstance(error, commands.MissingPermissions)
or isinstance(error, commands.NoPrivateMessage)):
await ctx.send_error(error)
else:
traceback.print_exc()
def setup(bot):
bot.add_cog(Xp(bot))
|
the-stack_106_29443 | import sys, logging, os, random, math, open_color, arcade
#check to make sure we are running the right version of Python
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
#turn on logging, in case we have to leave ourselves debugging messages
logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
MARGIN = 30
SCREEN_TITLE = "Bullet exercise"
NUM_ENEMIES = 5
STARTING_LOCATION = (400,100)
BULLET_DAMAGE = 10
USER_HP = 100
ENEMY_HP = 100
HIT_SCORE = 10
KILL_SCORE = 100
class Bullet(arcade.Sprite):
def __init__(self, position, velocity, damage):
'''
initializes the bullet
Parameters: position: (x,y) tuple
velocity: (dx, dy) tuple
damage: int (or float)
'''
super().__init__("assets/bullet.png", 0.5)
(self.center_x, self.center_y) = position
(self.dx, self.dy) = velocity
self.damage = damage
def update(self):
'''
Moves the bullet
'''
self.center_x += self.dx
self.center_y += self.dy
class eBullet(arcade.Sprite):
def __init__(self, position, velocity, damage):
'''
initializes the bullet
Parameters: position: (x,y) tuple
velocity: (dx, dy) tuple
damage: int (or float)
'''
super().__init__("assets/bullet_enemy.png", 0.5)
(self.center_x, self.center_y) = position
(self.dx, self.dy) = velocity
self.damage = damage
def update(self):
'''
Moves the bullet
'''
self.center_x += self.dx
self.center_y += self.dy
class Player(arcade.Sprite):
def __init__(self):
super().__init__("assets/narwhal.png", 0.5)
self.uhp = USER_HP
(self.center_x, self.center_y) = STARTING_LOCATION
class Enemy(arcade.Sprite):
def __init__(self, position):
'''
initializes a penguin enemy
Parameter: position: (x,y) tuple
'''
super().__init__("assets/penguin.png", 0.5)
self.hp = ENEMY_HP
(self.center_x, self.center_y) = position
class Window(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
self.set_mouse_visible(True)
arcade.set_background_color(open_color.blue_4)
self.bullet_list = arcade.SpriteList()
self.enemy_list = arcade.SpriteList()
self.player = Player()
self.score = 0
def setup(self):
'''
Set up enemies
'''
for i in range(NUM_ENEMIES):
x = 120 * (i+1) + 40
y = 500
enemy = Enemy((x,y))
self.enemy_list.append(enemy)
def update(self, delta_time):
self.bullet_list.update()
for e in self.enemy_list:
collision = arcade.check_for_collision_with_list(e, self.bullet_list)
for c in collision:
e.hp -= c.damage
self.score += HIT_SCORE
if e.hp <= 0:
e.kill()
self.score += KILL_SCORE
# check for collision
# for every bullet that hits, decrease the hp and then see if it dies
# increase the score
# e.kill() will remove the enemy sprite from the game
# the pass statement is a placeholder. Remove line 81 when you add your code
def on_draw(self):
arcade.start_render()
arcade.draw_text(str(self.score), 20, SCREEN_HEIGHT - 40, open_color.white, 16)
self.player.draw()
self.bullet_list.draw()
self.enemy_list.draw()
def on_mouse_motion(self, x, y, dx, dy):
'''
The player moves left and right with the mouse
'''
self.player.center_x = x
def on_mouse_press(self, x, y, button, modifiers):
if button == arcade.MOUSE_BUTTON_LEFT:
#fire a bullet
x = self.player.center_x
y = self.player.center_y
bullet = Bullet((x,y),(0,10), BULLET_DAMAGE)
self.bullet_list.append(bullet)
def main():
window = Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main() |
the-stack_106_29444 | """
Geographically weighted regression
"""
import numpy as np
from .gwr.base.gwr import GWR as PySAL_GWR
from .gwr.base.sel_bw import Sel_BW
import json
from crankshaft.analysis_data_provider import AnalysisDataProvider
import plpy
class GWR:
def __init__(self, data_provider=None):
if data_provider:
self.data_provider = data_provider
else:
self.data_provider = AnalysisDataProvider()
def gwr(self, subquery, dep_var, ind_vars,
bw=None, fixed=False, kernel='bisquare',
geom_col='the_geom', id_col='cartodb_id'):
"""
subquery: 'select * from demographics'
dep_var: 'pctbachelor'
ind_vars: ['intercept', 'pctpov', 'pctrural', 'pctblack']
bw: value of bandwidth, if None then select optimal
fixed: False (kNN) or True ('distance')
kernel: 'bisquare' (default), or 'exponential', 'gaussian'
"""
params = {'geom_col': geom_col,
'id_col': id_col,
'subquery': subquery,
'dep_var': dep_var,
'ind_vars': ind_vars}
# get data from data provider
query_result = self.data_provider.get_gwr(params)
# exit if data to analyze is empty
if len(query_result) == 0:
plpy.error('No data passed to analysis or independent variables '
'are all null-valued')
# unique ids and variable names list
rowid = np.array(query_result[0]['rowid'], dtype=np.int)
# x, y are centroids of input geometries
x = np.array(query_result[0]['x'], dtype=np.float)
y = np.array(query_result[0]['y'], dtype=np.float)
coords = list(zip(x, y))
# extract dependent variable
Y = np.array(query_result[0]['dep_var'], dtype=np.float).reshape((-1, 1))
n = Y.shape[0]
k = len(ind_vars)
X = np.zeros((n, k))
# extract query result
for attr in range(0, k):
attr_name = 'attr' + str(attr + 1)
X[:, attr] = np.array(
query_result[0][attr_name], dtype=np.float).flatten()
# add intercept variable name
ind_vars.insert(0, 'intercept')
# calculate bandwidth if none is supplied
if bw is None:
bw = Sel_BW(coords, Y, X,
fixed=fixed, kernel=kernel).search()
model = PySAL_GWR(coords, Y, X, bw,
fixed=fixed, kernel=kernel).fit()
# containers for outputs
coeffs = []
stand_errs = []
t_vals = []
filtered_t_vals = []
# extracted model information
c_alpha = model.adj_alpha
filtered_t = model.filter_tvals(c_alpha[1])
predicted = model.predy.flatten()
residuals = model.resid_response
r_squared = model.localR2.flatten()
bw = np.repeat(float(bw), n)
# create lists of json objs for model outputs
for idx in range(n):
coeffs.append(json.dumps({var: model.params[idx, k]
for k, var in enumerate(ind_vars)}))
stand_errs.append(json.dumps({var: model.bse[idx, k]
for k, var in enumerate(ind_vars)}))
t_vals.append(json.dumps({var: model.tvalues[idx, k]
for k, var in enumerate(ind_vars)}))
filtered_t_vals.append(
json.dumps({var: filtered_t[idx, k]
for k, var in enumerate(ind_vars)}))
return list(zip(coeffs, stand_errs, t_vals, filtered_t_vals,
predicted, residuals, r_squared, bw, rowid))
def gwr_predict(self, subquery, dep_var, ind_vars,
bw=None, fixed=False, kernel='bisquare',
geom_col='the_geom', id_col='cartodb_id'):
"""
subquery: 'select * from demographics'
dep_var: 'pctbachelor'
ind_vars: ['intercept', 'pctpov', 'pctrural', 'pctblack']
bw: value of bandwidth, if None then select optimal
fixed: False (kNN) or True ('distance')
kernel: 'bisquare' (default), or 'exponential', 'gaussian'
"""
params = {'geom_col': geom_col,
'id_col': id_col,
'subquery': subquery,
'dep_var': dep_var,
'ind_vars': ind_vars}
# get data from data provider
query_result = self.data_provider.get_gwr_predict(params)
# exit if data to analyze is empty
if len(query_result) == 0:
plpy.error('No data passed to analysis or independent variables '
'are all null-valued')
# unique ids and variable names list
rowid = np.array(query_result[0]['rowid'], dtype=np.int)
x = np.array(query_result[0]['x'], dtype=np.float)
y = np.array(query_result[0]['y'], dtype=np.float)
coords = np.array(list(zip(x, y)), dtype=np.float)
# extract dependent variable
Y = np.array(query_result[0]['dep_var']).reshape((-1, 1))
n = Y.shape[0]
k = len(ind_vars)
X = np.empty((n, k), dtype=np.float)
for attr in range(0, k):
attr_name = 'attr' + str(attr + 1)
X[:, attr] = np.array(
query_result[0][attr_name], dtype=np.float).flatten()
# add intercept variable name
ind_vars.insert(0, 'intercept')
# split data into "training" and "test" for predictions
# create index to split based on null y values
train = np.where(Y != np.array(None))[0]
test = np.where(Y == np.array(None))[0]
# report error if there is no data to predict
if len(test) < 1:
plpy.error('No rows flagged for prediction: verify that rows '
'denoting prediction locations have a dependent '
'variable value of `null`')
# split dependent variable (only need training which is non-Null's)
Y_train = Y[train].reshape((-1, 1))
Y_train = Y_train.astype(np.float)
# split coords
coords_train = coords[train]
coords_test = coords[test]
# split explanatory variables
X_train = X[train]
X_test = X[test]
# calculate bandwidth if none is supplied
if bw is None:
bw = Sel_BW(coords_train, Y_train, X_train,
fixed=fixed, kernel=kernel).search()
# estimate model and predict at new locations
model = PySAL_GWR(coords_train, Y_train, X_train,
bw, fixed=fixed,
kernel=kernel).predict(coords_test, X_test)
coeffs = []
stand_errs = []
t_vals = []
r_squared = model.localR2.flatten()
predicted = model.predy.flatten()
m = len(model.predy)
for idx in range(m):
coeffs.append(json.dumps({var: model.params[idx, k]
for k, var in enumerate(ind_vars)}))
stand_errs.append(json.dumps({var: model.bse[idx, k]
for k, var in enumerate(ind_vars)}))
t_vals.append(json.dumps({var: model.tvalues[idx, k]
for k, var in enumerate(ind_vars)}))
return list(zip(coeffs, stand_errs, t_vals,
r_squared, predicted, rowid[test]))
|
the-stack_106_29446 | r"""
Doubly stochastic gradient descent
==================================
In this tutorial we investigate and implement the doubly stochastic gradient descent
paper from `Ryan Sweke et al. (2019) <https://arxiv.org/abs/1910.01155>`__. In this paper,
it is shown that quantum gradient descent, where a finite number of measurement samples
(or *shots*) are used to estimate the gradient, is a form of stochastic gradient descent.
Furthermore, if the optimization involves a linear combination of expectation values
(such as VQE), sampling from the terms in this linear combination can further reduce required
resources, allowing for "doubly stochastic gradient descent".
Note that based on very similar observations, `Jonas Kuebler et al. (2019) <https://arxiv.org/abs/1909.09083>`_
recently proposed an optimizer (which they call the *individual Coupled Adaptive
Number of Shots (iCANS)* optimizer) that adapts the shot number of
measurements during training.
Background
----------
In classical machine learning, `stochastic gradient descent
<https://en.wikipedia.org/wiki/Stochastic_gradient_descent>`_ is a common optimization strategy
where the standard gradient descent parameter update rule,
.. math:: \theta^{(t+1)} = \theta^{(t)} - \eta \nabla \mathcal{L}(\theta^{(t)}),
is modified such that
.. math:: \theta^{(t+1)} = \theta^{(t)} - \eta g^{(t)}(\theta^{(t)})
where :math:`\eta` is the step-size, and :math:`\{g^{(t)}(\theta)\}` is a sequence of random
variables such that
.. math:: \mathbb{E}[g^{(t)}(\theta)] = \nabla\mathcal{L}(\theta).
In general, stochastic gradient descent is preferred over standard gradient
descent for several reasons:
1. Samples of the gradient estimator :math:`g^{(t)}(\theta)` can typically
be computed much more efficiently than :math:`\mathcal{L}(\theta)`,
2. Stochasticity can help to avoid local minima and saddle points,
3. Numerical evidence shows that convergence properties are superior to regular gradient descent.
In variational quantum algorithms, a parametrized quantum circuit :math:`U(\theta)`
is optimized by a classical optimization loop in order to minimize a function of the expectation
values. For example, consider the expectation values
.. math:: \langle A_i \rangle = \langle 0 | U(\theta)^\dagger A_i U(\theta) | 0\rangle
for a set of observables :math:`\{A_i\}`, and loss function
.. math:: \mathcal{L}(\theta, \langle A_1 \rangle, \dots, \langle A_M \rangle).
While the expectation values can be calculated analytically in classical simulations,
on quantum hardware we are limited to *sampling* from the expectation values; as the
number of samples (or shots) increase, we converge on the analytic expectation value, but can
never recover the exact expression. Furthermore, the parameter-shift rule
(`Schuld et al., 2018 <https://arxiv.org/abs/1811.11184>`__) allows for analytic
quantum gradients to be computed from a linear combination of the variational circuits'
expectation values.
Putting these two results together, `Sweke et al. (2019) <https://arxiv.org/abs/1910.01155>`__
show that samples of the expectation value fed into the parameter-shift rule provide
unbiased estimators of the quantum gradient---resulting in a form of stochastic gradient descent
(referred to as QSGD). Moreover, they show that convergence of the stochastic gradient
descent is guaranteed in sufficiently simplified settings, even in the case where the number
of shots is 1!
.. note::
It is worth noting that the smaller the number of shots used, the larger the
variance in the estimated expectation value. As a result, it may take
more optimization steps for convergence than using a larger number of shots,
or an exact value.
At the same time, a reduced number of shots may significantly reduce the
wall time of each optimization step, leading to a reduction in the overall
optimization time.
"""
##############################################################################
# Let's consider a simple example in PennyLane, comparing analytic gradient
# descent (with exact expectation values) to stochastic gradient descent
# using a finite number of shots.
#
# A single-shot stochastic gradient descent
# -----------------------------------------
#
# Consider the Hamiltonian
#
# .. math::
#
# H = \begin{bmatrix}
# 8 & 4 & 0 & -6\\
# 4 & 0 & 4 & 0\\
# 0 & 4 & 8 & 0\\
# -6 & 0 & 0 & 0
# \end{bmatrix}.
#
# We can solve for the ground state energy using
# the variational quantum eigensolver (VQE) algorithm.
#
# Let's use the ``default.qubit`` simulator for both the analytic gradient,
# as well as the estimated gradient using number of shots :math:`N\in\{1, 100\}`.
import pennylane as qml
from pennylane import numpy as np
np.random.seed(3)
from pennylane import expval
from pennylane.init import strong_ent_layers_uniform
from pennylane.templates.layers import StronglyEntanglingLayers
num_layers = 2
num_wires = 2
eta = 0.01
steps = 200
dev_analytic = qml.device("default.qubit", wires=num_wires, analytic=True)
dev_stochastic = qml.device("default.qubit", wires=num_wires, analytic=False)
##############################################################################
# We can use ``qml.Hermitian`` to directly specify that we want to measure
# the expectation value of the matrix :math:`H`:
H = np.array([[8, 4, 0, -6], [4, 0, 4, 0], [0, 4, 8, 0], [-6, 0, 0, 0]])
def circuit(params):
StronglyEntanglingLayers(weights=params, wires=[0, 1])
return expval(qml.Hermitian(H, wires=[0, 1]))
##############################################################################
# Now, we create three QNodes, each corresponding to a device above,
# and optimize them using gradient descent via the parameter-shift rule.
qnode_analytic = qml.QNode(circuit, dev_analytic)
qnode_stochastic = qml.QNode(circuit, dev_stochastic)
init_params = strong_ent_layers_uniform(num_layers, num_wires)
# Optimizing using exact gradient descent
cost_GD = []
params_GD = init_params
opt = qml.GradientDescentOptimizer(eta)
for _ in range(steps):
cost_GD.append(qnode_analytic(params_GD))
params_GD = opt.step(qnode_analytic, params_GD)
# Optimizing using stochastic gradient descent with shots=1
dev_stochastic.shots = 1
cost_SGD1 = []
params_SGD1 = init_params
opt = qml.GradientDescentOptimizer(eta)
for _ in range(steps):
cost_SGD1.append(qnode_stochastic(params_SGD1))
params_SGD1 = opt.step(qnode_stochastic, params_SGD1)
# Optimizing using stochastic gradient descent with shots=100
dev_stochastic.shots = 100
cost_SGD100 = []
params_SGD100 = init_params
opt = qml.GradientDescentOptimizer(eta)
for _ in range(steps):
cost_SGD100.append(qnode_stochastic(params_SGD100))
params_SGD100 = opt.step(qnode_stochastic, params_SGD100)
##############################################################################
# Note that in the latter two cases we are sampling from an unbiased
# estimator of the cost function, not the analytic cost function.
#
# To track optimization convergence, approaches could include:
#
# * Evaluating the cost function with a larger number of samples at specified
# intervals,
#
# * Keeping track of the *moving average* of the low-shot cost evaluations.
#
# We can now plot the cost against optimization step for the three cases above.
from matplotlib import pyplot as plt
plt.style.use("seaborn")
plt.plot(cost_GD[:100], label="Vanilla gradient descent")
plt.plot(cost_SGD100[:100], "--", label="QSGD (100 shots)")
plt.plot(cost_SGD1[:100], ".", label="QSGD (1 shot)")
# analytic ground state
min_energy = min(np.linalg.eigvalsh(H))
plt.hlines(min_energy, 0, 100, linestyles=":", label="Ground-state energy")
plt.ylabel("Cost function value")
plt.xlabel("Optimization steps")
plt.legend()
plt.show()
##############################################################################
# Using the trained parameters from each optimization strategy, we can
# evaluate the analytic quantum device:
print("Vanilla gradient descent min energy = ", qnode_analytic(params_GD))
print("Stochastic gradient descent (shots=100) min energy = ", qnode_analytic(params_SGD100))
print("Stochastic gradient descent (shots=1) min energy = ", qnode_analytic(params_SGD1))
##############################################################################
# Amazingly, we see that even the ``shots=1`` optimization converged
# to a reasonably close approximation of the ground-state energy!
##############################################################################
# Doubly stochastic gradient descent for VQE
# ------------------------------------------
#
# As noted in `Sweke et al. (2019) <https://arxiv.org/abs/1910.01155>`__,
# variational quantum algorithms often include terms consisting of linear combinations
# of expectation values. This is true of the parameter-shift rule (where the
# gradient of each parameter is determined by shifting the parameter by macroscopic
# amounts and taking the difference), as well as VQE, where the Hamiltonian
# is usually decomposed into a sum of Pauli expectation values.
#
# Consider the Hamiltonian from the previous section. As this Hamiltonian is a
# Hermitian observable, we can always express it as a sum of Pauli matrices using
# the relation
#
# .. math::
#
# H = \sum_{i,j=0,1,2,3} a_{i,j} (\sigma_i\otimes \sigma_j),
#
# where
#
# .. math::
#
# a_{i,j} = \frac{1}{4}\text{tr}[(\sigma_i\otimes \sigma_j )H], ~~ \sigma = \{I, X, Y, Z\}.
#
# Applying this, we can see that
#
# .. math::
#
# H = 4 + 2I\otimes X + 4I \otimes Z - X\otimes X + 5 Y\otimes Y + 2Z\otimes X.
#
# To perform "doubly stochastic" gradient descent, we simply apply the stochastic
# gradient descent approach from above, but in addition also uniformly sample
# a subset of the terms for the Hamiltonian expectation at each optimization step.
# This inserts another element of stochasticity into the system---all the while
# convergence continues to be guaranteed!
#
# Let's create a QNode that randomly samples a single term from the above
# Hamiltonian as the observable to be measured.
I = np.identity(2)
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.array([[1, 0], [0, -1]])
terms = np.array(
[2 * np.kron(I, X), 4 * np.kron(I, Z), -np.kron(X, X), 5 * np.kron(Y, Y), 2 * np.kron(Z, X)]
)
@qml.qnode(dev_stochastic)
def circuit(params, n=None):
StronglyEntanglingLayers(weights=params, wires=[0, 1])
idx = np.random.choice(np.arange(5), size=n, replace=False)
A = np.sum(terms[idx], axis=0)
return expval(qml.Hermitian(A, wires=[0, 1]))
def loss(params):
return 4 + (5 / 1) * circuit(params, n=1)
##############################################################################
# Optimizing the circuit using gradient descent via the parameter-shift rule:
dev_stochastic.shots = 100
cost = []
params = init_params
opt = qml.GradientDescentOptimizer(0.005)
for _ in range(250):
cost.append(loss(params))
params = opt.step(loss, params)
##############################################################################
# During doubly stochastic gradient descent, we are sampling from terms of the
# analytic cost function, so it is not entirely instructive to plot the cost
# versus optimization step---partial sums of the terms in the Hamiltonian
# may have minimum energy below the ground state energy of the total Hamiltonian.
# Nevertheless, we can keep track of the cost value moving average during doubly
# stochastic gradient descent as an indicator of convergence.
def moving_average(data, n=3):
ret = np.cumsum(data, dtype=np.float64)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
average = np.vstack([np.arange(25, 200), moving_average(cost, n=50)[:-26]])
plt.plot(cost_GD, label="Vanilla gradient descent")
plt.plot(cost, ".", label="Doubly QSGD")
plt.plot(average[0], average[1], "--", label="Doubly QSGD (moving average)")
plt.hlines(min_energy, 0, 200, linestyles=":", label="Ground state energy")
plt.ylabel("Cost function value")
plt.xlabel("Optimization steps")
plt.xlim(-2, 200)
plt.legend()
plt.show()
##############################################################################
# Finally, verifying that the doubly stochastic gradient descent optimization
# correctly provides the ground state energy when evaluated for a larger
# number of shots:
print("Doubly stochastic gradient descent min energy = ", qnode_analytic(params))
##############################################################################
# While stochastic gradient descent requires more optimization steps to achieve
# convergence, it is worth noting that it requires significantly fewer quantum
# device evaluations, and thus may as a result take less time overall.
##############################################################################
# Adaptive stochasticity
# ----------------------
#
# To improve on the convergence, we may even consider a crude "adaptive" modification
# of the doubly stochastic gradient descent optimization performed above. In this
# approach, we successively increase the number of terms we are sampling from as
# the optimization proceeds, as well as increasing the number of shots.
cost = []
params = init_params
opt = qml.GradientDescentOptimizer(0.005)
for i in range(250):
n = min(i // 25 + 1, 5)
dev_stochastic.shots = int(1 + (n - 1) ** 2)
def loss(params):
return 4 + (5 / n) * circuit(params, n=n)
cost.append(loss(params))
params = opt.step(loss, params)
average = np.vstack([np.arange(25, 200), moving_average(cost, n=50)[:-26]])
plt.plot(cost_GD, label="Vanilla gradient descent")
plt.plot(cost, ".", label="Adaptive QSGD")
plt.plot(average[0], average[1], "--", label="Adaptive QSGD (moving average)")
plt.hlines(min_energy, 0, 250, linestyles=":", label="Ground state energy")
plt.ylabel("Cost function value")
plt.xlabel("Optimization steps")
plt.xlim(-2, 200)
plt.legend()
plt.show()
print("Adaptive QSGD min energy = ", qnode_analytic(params))
##############################################################################
# References
# ----------
#
# 1. Ryan Sweke, Frederik Wilde, Johannes Jakob Meyer, Maria Schuld, Paul K. Fährmann,
# Barthélémy Meynard-Piganeau, Jens Eisert. "Stochastic gradient descent for
# hybrid quantum-classical optimization." `arXiv:1910.01155
# <https://arxiv.org/abs/1910.01155>`__, 2019.
|
the-stack_106_29449 | # Copyright (c) 2017 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import subprocess
import yaml
from tests.st.test_base import TestBase, HOST_IPV4
from tests.st.utils.docker_host import DockerHost, CLUSTER_STORE_DOCKER_OPTIONS
from tests.st.utils.utils import get_ip, log_and_run, retry_until_success, \
ETCD_CA, ETCD_CERT, ETCD_KEY, ETCD_HOSTNAME_SSL, ETCD_SCHEME, \
handle_failure, clear_on_failures, add_on_failure, wipe_etcd
_log = logging.getLogger(__name__)
_log.setLevel(logging.DEBUG)
POST_DOCKER_COMMANDS = [
"docker load -q -i /code/calico-node.tar",
"docker load -q -i /code/workload.tar",
]
NAMESPACE_PREFIX = "pcns"
class TestNamespace(TestBase):
"""
Tests that global network policy and namespaced network policy is correctly
implemented on namespaced workload endpoints.
"""
hosts = None
@classmethod
def setUpClass(cls):
# Wipe etcd once before any test in this class runs.
_log.debug("Wiping etcd")
wipe_etcd(HOST_IPV4)
# We set up 2 hosts on top of which running nine workloads in three namespaces.
# Host1 has 5 workloads.
# 2 in namespace nsa: [nsa_h1_wl0] [nsa_h1_wl1]
# 1 in namespace nsb: [nsb_h1_wl0]
# 2 in default namespace: [default_h1_wl0] [omit_h1_wl0]
# *omit* means 'namespace' field is not specified during workload setup.
#
# Host2 has 4 workloads.
# 1 in namespace nsa: [nsa_h2_wl0]
# 2 in namespace nsb: [nsb_h2_wl0] [nsb_h2_wl1]
# 1 in namespace default: [default_h2_wl0]
#
# Global network policies and network policies then apply on namespaced
# workload endpoints with mixed orders. The test checks connectivity of
# 4 workloads [nsa_h1_wl0, nsb_h2_wl0, default_h1_wl0, omit_h1_wl0] from
# other workloads.
# Create two hosts.
cls.hosts = []
cls.host1 = DockerHost("cali-host1",
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
post_docker_commands=POST_DOCKER_COMMANDS,
start_calico=False)
cls.host1_hostname = cls.host1.execute("hostname")
cls.host2 = DockerHost("cali-host2",
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
post_docker_commands=POST_DOCKER_COMMANDS,
start_calico=False)
cls.host2_hostname = cls.host2.execute("hostname")
cls.hosts.append(cls.host1)
cls.hosts.append(cls.host2)
# Start calico node on hosts.
for host in cls.hosts:
host.start_calico_node(env_options=" -e FELIX_HEALTHENABLED=true ")
handle_failure(lambda: retry_until_success(cls.host1.assert_is_ready, retries=20))
handle_failure(lambda: retry_until_success(cls.host2.assert_is_ready, retries=20))
# Prepare namespace profile so that we can use namespaceSelector for non-k8s deployment.
# CNI will use the existing profile which is setup here instead of creating its own.
cls.add_ns_profile('nsa')
cls.add_ns_profile('nsb')
cls.add_ns_profile('default')
# Create calico network.
cls.calinet = cls.host1.create_network("calinet")
# Create workloads for host1
# For CNI, network is used for cni_name but nothing else.
# We set network to same value as namespace name to let cni program a
# namespace profile for us.
cls.nsa_wl = cls.host1.create_workload(
"nsa_h1_wl0",
image="workload",
network="nsa",
labels=["wep=nsa_h1_wl0"],
namespace="nsa")
cls.host1.create_workload(
"nsa_h1_wl1",
image="workload",
network="nsa",
labels=["wep=nsa_h1_wl1"],
namespace="nsa")
cls.host1.create_workload(
"nsb_h1_wl0",
image="workload",
network="nsb",
labels=["wep=nsb_h1_wl0"],
namespace="nsb")
cls.default_wl = cls.host1.create_workload(
"default_h1_wl0",
image="workload",
network="default",
labels=["wep=default_h1_wl0"],
namespace="default")
cls.omit_wl = cls.host1.create_workload(
"omit_h1_wl0",
image="workload",
network="default",
labels=["wep=omit_h1_wl0"],
namespace=None)
# Create workloads for host2
cls.nsb_wl = cls.host2.create_workload(
"nsb_h2_wl0",
image="workload",
network="nsb",
labels=["wep=nsb_h2_wl0"],
namespace="nsb")
cls.host2.create_workload(
"nsb_h2_wl1",
image="workload",
network="nsb",
labels=["wep=nsb_h2_wl1"],
namespace="nsb")
cls.host2.create_workload(
"nsa_h2_wl0",
image="workload",
network="nsa",
labels=["wep=nsa_h2_wl0"],
namespace="nsa")
cls.host2.create_workload(
"default_h2_wl0",
image="workload",
network="default",
labels=["wep=default_h2_wl0"],
namespace="default")
# Work out workload set for different namespaces.
cls.all_workloads = cls.host1.workloads.union(cls.host2.workloads)
cls.wl_nsa = filter(lambda x: x.namespace == "nsa", cls.all_workloads)
cls.wl_nsb = filter(lambda x: x.namespace == "nsb", cls.all_workloads)
cls.wl_default = filter(lambda x: x.namespace == "default" or x.namespace is None, cls.all_workloads)
clear_on_failures()
add_on_failure(cls.host1.log_extra_diags)
add_on_failure(cls.host2.log_extra_diags)
@handle_failure
def test_can_access_without_policy(self):
"""
Test all workload can be accessed without policy.
"""
self.check_namespace_access(self.nsa_wl, True, True, True)
self.check_namespace_access(self.nsb_wl, True, True, True)
self.check_namespace_access(self.default_wl, True, True, True)
self.check_namespace_access(self.omit_wl, True, True, True)
@handle_failure
def test_global_policy(self):
"""
Test global network policy with different order.
"""
self.add_global_ingress(500, 'Deny', 'default')
self.add_global_ingress(200, 'Allow', 'nsa')
self.add_global_ingress(100, 'Deny', 'nsb')
self.check_namespace_access(self.nsa_wl, True, False, False)
self.check_namespace_access(self.nsb_wl, True, False, False)
self.check_namespace_access(self.default_wl, True, False, False)
self.check_namespace_access(self.omit_wl, True, False, False)
@handle_failure
def test_deny_nsa(self):
"""
Test network policy for namespace nsa.
"""
self.add_global_ingress(200, 'Allow')
self.add_namespace_ingress('nsa', 100, 'Deny', 'nsb')
self.check_namespace_access(self.nsa_wl, True, False, True)
self.check_namespace_access(self.nsb_wl, True, True, True)
self.check_namespace_access(self.default_wl, True, True, True)
self.check_namespace_access(self.omit_wl, True, True, True)
@handle_failure
def test_deny_nsa_with_two_policy(self):
"""
Test deny network policy for namespace nsa with two orders mixed with global network policy.
"""
self.add_global_ingress(200, 'Allow')
self.add_namespace_ingress('nsa', 300, 'Deny', 'nsb')
self.add_namespace_ingress('nsa', 100, 'Deny', 'default')
self.check_namespace_access(self.nsa_wl, True, True, False)
self.check_namespace_access(self.nsb_wl, True, True, True)
self.check_namespace_access(self.default_wl, True, True, True)
self.check_namespace_access(self.omit_wl, True, True, True)
@handle_failure
def test_deny_default_with_two_policy(self):
"""
Test deny network policy for namespace default with two orders mixed with global network policy.
"""
self.add_global_ingress(200, 'Allow')
self.add_namespace_ingress('default', 300, 'Deny', 'nsb')
self.add_namespace_ingress('default', 100, 'Deny', 'nsa')
self.check_namespace_access(self.nsa_wl, True, True, True)
self.check_namespace_access(self.nsb_wl, True, True, True)
self.check_namespace_access(self.default_wl, False, True, True)
self.check_namespace_access(self.omit_wl, False, True, True)
@handle_failure
def test_allow_nsb_with_two_policy(self):
"""
Test deny network policy for namespace nsb with two orders mixed with global network policy.
"""
self.add_global_ingress(200, 'Deny')
self.add_namespace_ingress('nsb', 300, 'Allow', 'nsa')
self.add_namespace_ingress('nsb', 100, 'Allow', 'default')
self.check_namespace_access(self.nsa_wl, False, False, False)
self.check_namespace_access(self.nsb_wl, False, False, True)
self.check_namespace_access(self.default_wl, False, False, False)
self.check_namespace_access(self.omit_wl, False, False, False)
@handle_failure
def test_allow_default_with_two_policy(self):
"""
Test deny network policy for namespace default with two orders mixed with global network policy.
"""
self.add_global_ingress(200, 'Deny')
self.add_namespace_ingress('default', 300, 'Allow', 'nsb')
self.add_namespace_ingress('default', 100, 'Allow', 'nsa')
self.check_namespace_access(self.nsa_wl, False, False, False)
self.check_namespace_access(self.nsb_wl, False, False, False)
self.check_namespace_access(self.default_wl, True, False, False)
self.check_namespace_access(self.omit_wl, True, False, False)
@handle_failure
def test_mixed_deny(self):
"""
Test mixed deny network policy for namespaces mixed with global network policy.
"""
self.add_global_ingress(200, 'Allow')
self.add_namespace_ingress('nsa', 300, 'Deny', 'default')
self.add_namespace_ingress('nsa', 100, 'Deny', 'nsb')
self.add_namespace_ingress('nsb', 300, 'Deny', 'default')
self.add_namespace_ingress('nsb', 100, 'Deny', 'nsa')
self.add_namespace_ingress('default', 300, 'Deny', 'nsa')
self.add_namespace_ingress('default', 100, 'Deny', 'default')
self.check_namespace_access(self.nsa_wl, True, False, True)
self.check_namespace_access(self.nsb_wl, False, True, True)
self.check_namespace_access(self.default_wl, True, True, False)
self.check_namespace_access(self.omit_wl, True, True, False)
def setUp(self):
# Override the per-test setUp to avoid wiping etcd; instead only clean up the data we
# added.
self.remove_policy()
def tearDown(self):
self.remove_policy()
super(TestNamespace, self).tearDown()
@classmethod
def tearDownClass(cls):
cls.delete_all("profile")
# Tidy up
for host in cls.hosts:
host.remove_workloads()
for host in cls.hosts:
host.cleanup()
del host
cls.calinet.delete()
clear_on_failures()
def add_namespace_ingress(self, ns, order, action, from_ns):
ns_selector = "ns_profile == '%s'" % from_ns
self.add_policy({
'apiVersion': 'projectcalico.org/v3',
'kind': 'NetworkPolicy',
'metadata': {
'name': '%s-%s-%s-from-%s' % (ns, order, action.lower(), from_ns),
'namespace': ns
},
'spec': {
'order': order,
'ingress': [
{
'protocol': 'TCP',
'source': {
'namespaceSelector': ns_selector,
},
'action': action.capitalize(),
},
],
'egress': [],
}
})
def add_global_ingress(self, order, action, from_ns='all'):
if from_ns != 'all':
ingress_map = {
'source': {
'selector': "%s.ns_profile == '%s'" % (NAMESPACE_PREFIX, from_ns)
},
'action': action.capitalize(),
}
else:
ingress_map = {
'action': action.capitalize(),
}
self.add_policy({
'apiVersion': 'projectcalico.org/v3',
'kind': 'GlobalNetworkPolicy',
'metadata': {
'name': 'global-%s-%s-from-%s' % (order, action.lower(), from_ns),
},
'spec': {
'order': order,
'ingress': [
ingress_map,
],
'egress': [],
}
})
@classmethod
def add_ns_profile(cls, ns):
profile_data = {
'apiVersion': 'projectcalico.org/v3',
'kind': 'Profile',
'metadata': {
'name': ns,
},
'spec': {
'labelsToApply': {
'%s.ns_profile' % NAMESPACE_PREFIX: ns
},
'ingress': [
{
'action': 'Allow',
},
],
'egress': [
{
'action': 'Allow',
},
],
}
}
cls._apply_resources(profile_data, cls.host1)
def add_policy(self, policy_data):
self._apply_resources(policy_data, self.host1)
def check_namespace_access(self, target, nsa_can, nsb_can, default_can):
assert_func = {
True: self.assert_workload_can_access_workload,
False: self.assert_workload_can_not_access_workload
}
for src in self.wl_nsa:
if not src == target:
assert_func[nsa_can](src, target)
for src in self.wl_nsb:
if not src == target:
assert_func[nsb_can](src, target)
for src in self.wl_default:
if not src == target:
assert_func[default_can](src, target)
def assert_workload_can_access_workload(self, src_workload, target_workload):
_log.info("Can access test from %s to %s", src_workload.name, target_workload.name)
if src_workload.check_can_tcp(target_workload.ip, 1):
return
_log.exception("workload %s with IP:%s failed to access workload %s on IP:%s",
src_workload.name, src_workload.ip, target_workload.name, target_workload.ip)
msg = ("workload %s with IP:%s failed to access workload %s on IP:%s" %
(src_workload.name, src_workload.ip, target_workload.name, target_workload.ip))
self.fail(msg)
def assert_workload_can_not_access_workload(self, src_workload, target_workload):
_log.info("Cannot access test from %s to %s", src_workload.name, target_workload.name)
if src_workload.check_cant_tcp(target_workload.ip, 1):
return
_log.exception("workload %s with IP:%s can access workload %s on IP:%s",
src_workload.name, src_workload.ip, target_workload.name, target_workload.ip)
msg = ("workload %s with IP:%s can access workload %s on IP:%s" %
(src_workload.name, src_workload.ip, target_workload.name, target_workload.ip))
self.fail(msg)
def remove_policy(self):
self.delete_all("globalnetworkpolicy")
self.delete_all("networkpolicy --all-namespaces")
@classmethod
def delete_all(cls, resource):
# Grab all objects of a resource type
objects = yaml.load(cls.hosts[0].calicoctl("get %s -o yaml" % resource))
# and delete them (if there are any)
if len(objects) > 0:
_log.info("objects: %s", objects)
if 'items' in objects and len(objects['items']) == 0:
pass
else:
cls._delete_data(objects, cls.hosts[0])
@classmethod
def _delete_data(cls, data, host):
_log.debug("Deleting data with calicoctl: %s", data)
cls._exec_calicoctl("delete", data, host)
@classmethod
def _apply_resources(cls, resources, host):
cls._exec_calicoctl("apply", resources, host)
@staticmethod
def _exec_calicoctl(action, data, host):
# Delete creationTimestamp fields from the data that we're going to
# write.
for obj in data.get('items', []):
if 'creationTimestamp' in obj['metadata']:
del obj['metadata']['creationTimestamp']
if 'metadata' in data and 'creationTimestamp' in data['metadata']:
del data['metadata']['creationTimestamp']
# Use calicoctl with the modified data.
host.writefile("new_data",
yaml.dump(data, default_flow_style=False))
host.calicoctl("%s -f new_data" % action)
@classmethod
def get_container_ip(cls, container_name):
ip = log_and_run(
"docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' %s" %
container_name)
return ip.strip()
|
the-stack_106_29450 | import numpy as np
def noise_replace(iris, upper_occlusion_theta, lower_occlusion_theta):
'''
Replaces portions of the iris with continuous uniform noise.
Input:
iris - numpy array of pixel intensities of transformed iris.
upper_occlusion_theta - tuple that defines the angular bounds of the upper iris
that is to be replaced with noise.
lower_occlustion_theta - tuple that defines the angular bounds of the lower iris
that is to be replaced with noise.
Output:
iris - numpy array of pixel intensities of transformed iris with desired
portions replaced with noise.
'''
height = iris.shape[0]
top_lid_min = int(upper_occlusion_theta[0])
top_lid_max = int(upper_occlusion_theta[1])
lower_lid_min = int(lower_occlusion_theta[0])
lower_lid_max = int(lower_occlusion_theta[1])
width_upper = top_lid_max - top_lid_min
width_lower = lower_lid_max - lower_lid_min
# find mean iris intensity and use it to construct noise with same mean.
normalized_magnitude = (np.sum(iris)/iris.size)
upper_noise = np.random.random((height,width_upper))*normalized_magnitude
lower_noise = np.random.random((height,width_lower))*normalized_magnitude
iris[:,top_lid_min:top_lid_max] = upper_noise
iris[:,lower_lid_min:lower_lid_max] = lower_noise
return iris
def noise_replace_eyelid(iris):
'''
Replaces portions of the iris with that are covered by masks.
Input:
iris - numpy array of pixel intensities of transformed iris.
Output:
iris - numpy array of pixel intensities of transformed iris with desired
portions replaced with noise.
'''
# find mean iris intensity and use it to construct noise with same mean.
normalized_magnitude = (np.sum(iris)/iris.size)
loc = (iris == 0)
iris[loc] = normalized_magnitude
return iris
def iris_extension(iris, theta_resolution, lower_theta = 0, upper_theta = 0):
'''
Extends the iris by inserting portions of the iris before zero at the beginning and appending
portions of th iris after zero to the end.
Inputs:
iris - numpy array of pixel intensities of transformed iris
theta_resolution - double; degree of upsampling used in the transform along the theta axis
lower_theta - int; degrees below zero that define the iris insertion bounds
upper_theta - int; degrees above zero that define the iris extension bounds
Outputs:
iris - numpy array of pixel intensities that represents the extended iris
'''
upper_theta = int(upper_theta/theta_resolution)
lower_theta = int(lower_theta/theta_resolution)
UPPER_BOUND = int(360/theta_resolution)
iris_extension = iris[:,0:upper_theta]
iris_insertion = iris[:,(UPPER_BOUND+lower_theta):UPPER_BOUND]
iris = np.concatenate((iris_insertion,iris,iris_extension),axis=1)
return iris
|
the-stack_106_29451 | # Original Code here:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
from contextlib import contextmanager
import os
import logging
import shutil
import tempfile
from typing import Callable, Dict, Generator, Optional, Type, Any
import torch
from datetime import timedelta
import ray
from ray import tune
from ray.tune.result import RESULT_DUPLICATE
from ray.tune.logger import NoopLogger
from ray.tune.function_runner import wrap_function
from ray.tune.trainable import DistributedTrainable
from ray.tune.utils.placement_groups import PlacementGroupFactory
from ray.tune.utils.trainable import PlacementGroupUtil, TrainableUtil
from ray.tune.utils import detect_checkpoint_function
from ray.util.sgd.torch.utils import setup_process_group, setup_address
from ray.util.placement_group import remove_placement_group
from ray.util.sgd.torch.constants import NCCL_TIMEOUT_S
logger = logging.getLogger(__name__)
_distributed_enabled = False
def is_distributed_trainable():
"""Returns True if executing within a DistributedTrainable."""
return _distributed_enabled
def enable_distributed_trainable():
global _distributed_enabled
_distributed_enabled = True
def logger_creator(log_config: Dict, logdir: str, rank: int) -> NoopLogger:
worker_dir = os.path.join(logdir, "worker_{}".format(rank))
os.makedirs(worker_dir, exist_ok=True)
return NoopLogger(log_config, worker_dir)
class _TorchTrainable(DistributedTrainable):
"""Base class for distributed training on Tune.
A wrapper class is needed to actually create a working
version of this trainable.
"""
_function = None
_num_workers = None
_num_gpus_per_worker = None
_num_cpus_per_worker = None
_num_workers_per_host = None
_placement_group = None
_timeout_s = None
__slots__ = ["workers", "_finished"]
@property
def should_colocate(self) -> bool:
return self._num_workers_per_host is not None
@classmethod
def default_process_group_parameters(cls) -> Dict:
return dict(timeout=timedelta(seconds=NCCL_TIMEOUT_S), backend="gloo")
def setup(self, config: Dict):
self._finished = False
num_workers = self._num_workers
logdir = self.logdir
assert self._function
func_trainable = wrap_function(self.__class__._function)
remote_trainable = ray.remote(func_trainable)
(
remote_option,
self._placement_group,
) = PlacementGroupUtil.get_remote_worker_options(
self._num_workers,
self._num_cpus_per_worker,
self._num_gpus_per_worker,
self._num_workers_per_host,
self._timeout_s,
)
remote_trainable = remote_trainable.options(**remote_option)
new_config = DistributedTrainable.build_config(self, config)
self.workers = [
remote_trainable.remote(
config=new_config,
logger_creator=lambda cfg: logger_creator(cfg, logdir, rank),
)
for rank in range(num_workers)
]
# Address has to be IP of rank 0 worker's node.
address = ray.get(self.workers[0].execute.remote(lambda _: setup_address()))
pgroup_params = self.default_process_group_parameters()
from functools import partial
setup_on_worker = partial(
setup_process_group, url=address, world_size=num_workers, **pgroup_params
)
ray.get(
[
w.execute.remote(lambda _: setup_on_worker(world_rank=rank))
for rank, w in enumerate(self.workers)
]
)
ray.get(
[
w.execute.remote(lambda _: enable_distributed_trainable())
for rank, w in enumerate(self.workers)
]
)
def step(self) -> Dict:
if self._finished:
raise RuntimeError("Training has already finished.")
result = ray.get([w.step.remote() for w in self.workers])[0]
if RESULT_DUPLICATE in result:
self._finished = True
return result
def save_checkpoint(self, checkpoint_dir: str) -> str:
# TODO: optimize if colocated
save_obj = ray.get(self.workers[0].save_to_object.remote())
checkpoint_path = TrainableUtil.create_from_pickle(save_obj, checkpoint_dir)
return checkpoint_path
def load_checkpoint(self, checkpoint_dir: str):
checkpoint_obj = TrainableUtil.checkpoint_to_object(checkpoint_dir)
return ray.get(
[w.restore_from_object.remote(checkpoint_obj) for w in self.workers]
)
def stop(self):
ray.get([worker.stop.remote() for worker in self.workers])
if self.should_colocate:
remove_placement_group(self._placement_group)
def DistributedTrainableCreator(
func: Callable[[Dict, Optional[str]], Any],
num_workers: int = 1,
num_cpus_per_worker: int = 1,
num_gpus_per_worker: int = 0,
num_workers_per_host: Optional[int] = None,
backend: str = "gloo",
timeout_s: int = NCCL_TIMEOUT_S,
use_gpu=None,
) -> Type[_TorchTrainable]:
"""Creates a class that executes distributed training.
Similar to running `torch.distributed.launch`.
Note that you typically should not instantiate the object
created.
Args:
func: This function is a Tune trainable function.
This function must have 2 args in the signature, and the
latter arg must contain `checkpoint_dir`. For example:
`func(config, checkpoint_dir=None)`.
num_workers: Number of training workers to include in
world.
num_cpus_per_worker: Number of CPU resources to reserve
per training worker.
num_gpus_per_worker: Number of GPU resources to reserve
per training worker.
num_workers_per_host: Optional[int]: Number of workers to
colocate per host.
backend: One of "gloo", "nccl".
timeout_s: Seconds before the torch process group
times out. Useful when machines are unreliable. Defaults
to 1800 seconds. This value is also reused for triggering
placement timeouts if forcing colocation.
Returns:
type(Trainable): A trainable class object that can be passed
to Tune. Resources are automatically set within the object, so
users do not need to set `resources_per_trainable`.
Example:
.. code-block:: python
trainable_cls = DistributedTrainableCreator(
train_func, num_workers=2)
analysis = tune.run(trainable_cls)
"""
if use_gpu:
raise DeprecationWarning(
"use_gpu is deprecated. Use 'num_gpus_per_worker' instead."
)
detect_checkpoint_function(func, abort=True)
if num_workers_per_host:
if num_workers % num_workers_per_host:
raise ValueError(
"`num_workers` must be an integer multiple of workers_per_node."
)
class WrappedDistributedTorchTrainable(_TorchTrainable):
_function = func
_num_workers = num_workers
_num_cpus_per_worker = num_cpus_per_worker
_num_gpus_per_worker = num_gpus_per_worker
_num_workers_per_host = num_workers_per_host
_timeout_s = timeout_s
@classmethod
def default_process_group_parameters(self) -> Dict:
return dict(timeout=timedelta(seconds=timeout_s), backend=backend)
@classmethod
def default_resource_request(cls, config: Dict) -> PlacementGroupFactory:
return PlacementGroupFactory(
[{}]
+ [{"CPU": cls._num_cpus_per_worker, "GPU": cls._num_gpus_per_worker}]
* num_workers
)
return WrappedDistributedTorchTrainable
@contextmanager
def distributed_checkpoint_dir(
step: int, disable: bool = False
) -> Generator[str, None, None]:
"""ContextManager for creating a distributed checkpoint.
Only checkpoints a file on the "main" training actor, avoiding
redundant work.
Args:
step: Used to label the checkpoint
disable: Disable for prototyping.
Yields:
str: A path to a directory. This path will be used
again when invoking the training_function.
Example:
.. code-block:: python
def train_func(config, checkpoint_dir):
if checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
model_state_dict = torch.load(path)
if epoch % 3 == 0:
with distributed_checkpoint_dir(step=epoch) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save(model.state_dict(), path)
"""
if torch.distributed.get_rank() == 0 and not disable:
with tune.checkpoint_dir(step=step) as checkpoint_dir:
yield checkpoint_dir
else:
path = tempfile.mkdtemp()
yield path
shutil.rmtree(path)
def _train_check_global(config: Dict, checkpoint_dir: Optional[str] = None):
"""For testing only. Putting this here because Ray has problems
serializing within the test file."""
assert is_distributed_trainable()
import time
time.sleep(0.1)
tune.report(is_distributed=True)
def _train_simple(config: Dict, checkpoint_dir: Optional[str] = None):
"""For testing only. Putting this here because Ray has problems
serializing within the test file."""
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
import torch.optim as optim
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 8, 5, 5, 5
# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
loss_fn = nn.MSELoss()
# Use the nn package to define our model and loss function.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
optimizer = optim.SGD(model.parameters(), lr=0.1)
if checkpoint_dir:
with open(os.path.join(checkpoint_dir, "checkpoint")) as f:
model_state, optimizer_state = torch.load(f)
model.load_state_dict(model_state)
optimizer.load_state_dict(optimizer_state)
model = DistributedDataParallel(model)
for epoch in range(config.get("epochs", 10)):
optimizer.zero_grad()
output = model(x)
loss = loss_fn(output, y)
loss.backward()
optimizer.step()
if epoch % 3 == 0:
if config.get("enable_checkpoint", True):
with distributed_checkpoint_dir(step=epoch) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save((model.state_dict(), optimizer.state_dict()), path)
tune.report(mean_loss=loss.item())
def _train_validate_session(config: Dict, checkpoint_dir: Optional[str] = None):
"""For testing only. Putting this here because Ray has problems
serializing within the test file."""
current_session = tune.session.get_session()
assert current_session is not None
assert current_session.trial_id != "default"
assert current_session.trial_name != "default"
|
the-stack_106_29452 | from pathlib import Path
import pytest
from pydantic import BaseModel, ValidationError
from beanie.odm.fields import PydanticObjectId
from beanie.odm.utils.encoder import bson_encoder
from tests.odm.models import (
DocumentWithCustomFiledsTypes,
DocumentWithBsonEncodersFiledsTypes,
Sample,
)
class M(BaseModel):
p: PydanticObjectId
def test_pydantic_object_id_wrong_input():
with pytest.raises(ValidationError):
M(p="test")
def test_pydantic_object_id_bytes_input():
p = PydanticObjectId()
m = M(p=str(p).encode("utf-8"))
assert m.p == p
with pytest.raises(ValidationError):
M(p=b"test")
async def test_bson_encoders_filed_types():
custom = DocumentWithBsonEncodersFiledsTypes(
color="7fffd4",
)
c = await custom.insert()
c_fromdb = await DocumentWithBsonEncodersFiledsTypes.get(c.id)
assert c_fromdb.color.as_hex() == c.color.as_hex()
async def test_custom_filed_types():
custom1 = DocumentWithCustomFiledsTypes(
color="#753c38",
decimal=500,
secret_bytes=b"secret_bytes",
secret_string="super_secret_password",
ipv4address="127.0.0.1",
ipv4interface="192.0.2.5/24",
ipv4network="192.0.2.0/24",
ipv6address="::abc:7:def",
ipv6interface="2001:db00::2/24",
ipv6network="2001:db00::0/24",
date="2000-12-24",
time="12:24:12.000333",
timedelta=4782453,
set_type={"one", "two", "three"},
tuple_type=tuple([3, "string"]),
path="/etc/hosts",
)
custom2 = DocumentWithCustomFiledsTypes(
color="magenta",
decimal=500.213,
secret_bytes=b"secret_bytes",
secret_string="super_secret_password",
ipv4address="127.0.0.1",
ipv4interface="192.0.2.5/24",
ipv4network="192.0.2.0/24",
ipv6address="::abc:7:def",
ipv6interface="2001:db00::2/24",
ipv6network="2001:db00::0/24",
date=1627498153,
time="12:35",
timedelta=4782453,
set_type=["one", "two", "three"],
tuple_type=[3, "three"],
path=Path("C:\\Windows"),
)
c1 = await custom1.insert()
c2 = await custom2.insert()
c1_fromdb = await DocumentWithCustomFiledsTypes.get(c1.id)
c2_fromdb = await DocumentWithCustomFiledsTypes.get(c2.id)
assert set(c1_fromdb.set_type) == set(c1.set_type)
assert set(c2_fromdb.set_type) == set(c2.set_type)
c1_fromdb.set_type = c2_fromdb.set_type = c1.set_type = c2.set_type = None
assert bson_encoder.encode(c1_fromdb) == bson_encoder.encode(c1)
assert bson_encoder.encode(c2_fromdb) == bson_encoder.encode(c2)
def test_hidden(document):
assert document.revision_id is None
assert "revision_id" not in document.dict()
def test_expression_fields():
assert Sample.nested.integer == "nested.integer"
assert Sample.nested["integer"] == "nested.integer"
|
the-stack_106_29455 | import time
import dateparser
import pytz
import json
from datetime import datetime
from binance import Client
import binance.constants as bc
def date_to_milliseconds(date_str):
"""Convert UTC date to milliseconds
If using offset strings add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
See dateparse docs for formats http://dateparser.readthedocs.io/en/latest/
:param date_str: date in readable format, i.e. "January 01, 2018", "11 hours ago UTC", "now UTC"
:type date_str: str
"""
# get epoch value in UTC
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
# parse our date string
d = dateparser.parse(date_str)
# if the date is not timezone aware apply UTC timezone
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=pytz.utc)
# return the difference in time
return int((d - epoch).total_seconds() * 1000.0)
def interval_to_milliseconds(interval):
"""Convert a Binance interval string to milliseconds
:param interval: Binance interval string 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w
:type interval: str
:return:
None if unit not one of m, h, d or w
None if string not in correct format
int value of interval in milliseconds
"""
ms = None
seconds_per_unit = {
"m": 60,
"h": 60 * 60,
"d": 24 * 60 * 60,
"w": 7 * 24 * 60 * 60
}
unit = interval[-1]
if unit in seconds_per_unit:
try:
ms = int(interval[:-1]) * seconds_per_unit[unit] * 1000
except ValueError:
pass
return ms
def historical_klines(symbol, interval, start_str, end_str=None):
"""Get Historical Klines from Binance
See dateparse docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values
"""
# create the Binance client, no need for api key
client = Client("", "")
# init our list
output_data = []
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
start_ts = date_to_milliseconds(start_str)
# if an end time was passed convert it
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
# it can be difficult to know when a symbol was listed on Binance so allow start time to be before list date
symbol_existed = False
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = client.klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where our start date is before the symbol pair listed on Binance
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
# append this loops data to our output data
output_data += temp_data
# update our start timestamp using the last value in the array and add the interval timeframe
start_ts = temp_data[len(temp_data) - 1][0] + timeframe
else:
# it wasn't listed yet, increment our start date
start_ts += timeframe
idx += 1
# check if we received less than the required limit and exit the loop
if len(temp_data) < limit:
# exit the while loop
break
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
return output_data
symbol = "ETHBTC"
start = "1 Dec, 2017"
end = "1 Jan, 2018"
interval = bc.KLINE_INTERVAL_30MINUTE
klines = historical_klines(symbol, interval, start, end)
# open a file with filename including symbol, interval and start and end converted to milliseconds
with open(
"Binance_{}_{}_{}-{}.json".format(
symbol,
interval,
date_to_milliseconds(start),
date_to_milliseconds(end)
),
'w' # set file write mode
) as f:
f.write(json.dumps(klines))
|
the-stack_106_29456 | import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from lymph.utils.autoencoder import Autoencoder
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index: (start_index + batch_size)]
def train_stacked_autoencoders(in_data, stack_shape):
"""
训练堆栈式自编码器
:param in_data: 输入数据,形状应为[None, n],其中n为一个正整数
:param stack_shape: 堆栈式自编码器中各个层大小, 例如:
输入为784(MNIST手写字符集的每个图像为28×28=784),第一个自编码器隐层有300个单元,第二个有100个单元的参数设置:
stack_shape = [784, 300, 100]
:return: 一个list,其中中包含了堆栈式自编码器中的每个实例
"""
# 定义训练参数
training_epochs = 200
batch_size = 128
# 初始化stacked autoencoders列表
stacked_autoencoders = [Autoencoder(stack_shape[i], stack_shape[i + 1]) for i in range(len(stack_shape) - 1)]
# 逐层训练自编码器(贪婪)
for i in range(len(stack_shape) - 1):
train_data = in_data if i == 0 else stacked_autoencoders[i - 1].transform(train_data)
for epoch in range(training_epochs):
for k in range(train_data.shape[0] // batch_size):
batch_xs = get_random_block_from_data(train_data, batch_size)
stacked_autoencoders[i].partial_fit(batch_xs)
if epoch % 10 == 0: # 显示epoch信息
test_cost = stacked_autoencoders[i].calc_total_cost(train_data)
print("[Encoder_{}] Epoch_{}, cost={}".format(i, epoch, test_cost))
return stacked_autoencoders
def train_MNIST(stacked_autoecoders: list):
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
w1 = tf.Variable(stacked_autoecoders[0].getWeights(), dtype=tf.float32)
b1 = tf.Variable(stacked_autoecoders[0].getBiases(), dtype=tf.float32)
w2 = tf.Variable(stacked_autoecoders[1].getWeights(), dtype=tf.float32)
b2 = tf.Variable(stacked_autoecoders[1].getBiases(), dtype=tf.float32)
w3 = tf.Variable(tf.zeros([100, 10]), dtype=tf.float32)
b3 = tf.Variable(tf.zeros([10]), dtype=tf.float32)
[encoder.close_session() for encoder in stacked_autoecoders]
h1 = tf.nn.softplus(tf.matmul(x, w1) + b1)
h2 = tf.nn.softplus(tf.matmul(h1, w2) + b2)
y = tf.nn.softmax(tf.matmul(h2, w3) + b3) # softmax layer
loss = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), axis=1))
train_step = tf.train.AdamOptimizer().minimize(loss)
prediction = tf.equal(tf.arg_max(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(5001):
batch_xs, batch_ys = mnist.train.next_batch(100)
train_loss, _ = sess.run([loss, train_step], feed_dict={x: batch_xs, y_: batch_ys})
if i % 100 == 0:
train_accuracy = sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys})
print("[step %d] train_loss=%.3f, train_accuracy=%.3f" % (i, train_loss, train_accuracy), end=" ")
test_accuracy, test_loss = sess.run([accuracy, loss],
feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print("|| test_loss=%.3f, test_accuracy=%.3f" % (i, test_loss, test_accuracy))
if __name__ == '__main__':
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
sas = train_stacked_autoencoders(in_data=mnist.train.images, stack_shape=[784, 300, 100])
train_MNIST(sas)
|
the-stack_106_29457 | # coding=utf-8
# Copyright (c) DIRECT Contributors
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Taken from Detectron 2, licensed under Apache 2.0.
# https://github.com/facebookresearch/detectron2/blob/60d7a1fd33cc48e58968659cd3301f3300b2786b/detectron2/solver/lr_scheduler.py
# Changes:
# - Docstring to match the rest of the library.
# - Calls to other subroutines which do not exist in DIRECT.
# - Stylistic changes.
import math
from bisect import bisect_right
from typing import List
import torch
import logging
# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes
# only on epoch boundaries. We typically use iteration based schedules instead.
# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean
# "iteration" instead.
# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating
# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it.
class LRScheduler(torch.optim.lr_scheduler._LRScheduler): # noqa
def __init__(self, optimizer, last_epoch=-1, verbose=False):
super().__init__(optimizer, last_epoch, verbose)
self.logger = logging.getLogger(type(self).__name__)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer or logger.
"""
state_dict = {key: value for key, value in self.__dict__.items() if key not in ["optimizer", "logger"]}
return state_dict
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
milestones: List[int],
gamma: float = 0.1,
warmup_factor: float = 0.001,
warmup_iterations: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {milestones}",
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iterations = warmup_iterations
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method,
self.last_epoch,
self.warmup_iterations,
self.warmup_factor,
)
return [
base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
max_iters: int,
warmup_factor: float = 0.001,
warmup_iterations: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iterations = warmup_iterations
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method,
self.last_epoch,
self.warmup_iterations,
self.warmup_factor,
)
# Different definitions of half-cosine with warmup are possible. For
# simplicity we multiply the standard half-cosine schedule by the warmup
# factor. An alternative is to start the period of the cosine at warmup_iterations
# instead of at 0. In the case that warmup_iterations << max_iters the two are
# very close to each other.
return [
base_lr * warmup_factor * 0.5 * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters))
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
def _get_warmup_factor_at_iter(method: str, iter: int, warmup_iters: int, warmup_factor: float) -> float:
"""
Return the learning rate warmup factor at a specific iteration.
Parameters
----------
method : str
Warmup method; either "constant" or "linear".
iter : int
Iteration at which to calculate the warmup factor.
warmup_iters : int
The length of the warmup phases.
warmup_factor : float
The base warmup factor (the meaning changes according to the method used).
Returns
-------
float: The effective warmup factor at the given iteration.
"""
if iter >= warmup_iters:
return 1.0
if method == "constant":
return warmup_factor
if method == "linear":
alpha = iter / warmup_iters
return warmup_factor * (1 - alpha) + alpha
raise ValueError(f"Unknown warmup method: {method}")
|
the-stack_106_29458 | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
## \todo Support other list operations for child access
class SplitContainer( GafferUI.ContainerWidget ) :
Orientation = IECore.Enum.create( "Vertical", "Horizontal" )
def __init__( self, orientation=Orientation.Vertical, borderWidth=0, **kw ) :
GafferUI.ContainerWidget.__init__( self, _Splitter(), **kw )
self.__widgets = []
self.__handleWidgets = {}
self.__sizeAnimation = None
self._qtWidget().setContentsMargins( borderWidth, borderWidth, borderWidth, borderWidth )
self.setOrientation( orientation )
def getOrientation( self ) :
o = self._qtWidget().orientation()
if o == QtCore.Qt.Horizontal :
return self.Orientation.Horizontal
else :
return self.Orientation.Vertical
def setOrientation( self, orientation ) :
v = QtCore.Qt.Horizontal if orientation == self.Orientation.Horizontal else QtCore.Qt.Vertical
self._qtWidget().setOrientation( QtCore.Qt.Orientation( v ) )
def append( self, child ) :
oldParent = child.parent()
if oldParent is not None :
oldParent.removeChild( child )
self.__applySizePolicy( child )
self.__widgets.append( child )
self._qtWidget().addWidget( child._qtWidget() )
child._applyVisibility()
assert( child._qtWidget().parent() is self._qtWidget() )
def remove( self, child ) :
self.removeChild( child )
def insert( self, index, child ) :
assert( child not in self.__widgets )
oldParent = child.parent()
if oldParent :
oldParent.removeChild( child )
self.__applySizePolicy( child )
self.__widgets.insert( index, child )
self._qtWidget().insertWidget( index, child._qtWidget() )
child._applyVisibility()
assert( child._qtWidget().parent() is self._qtWidget() )
def index( self, child ) :
return self.__widgets.index( child )
def addChild( self, child ) :
self.append( child )
def removeChild( self, child ) :
assert( child in self.__widgets )
child._qtWidget().setSizePolicy( child.__originalSizePolicy )
child._qtWidget().setParent( None )
child._applyVisibility()
self.__widgets.remove( child )
## Returns a list of actual pixel sizes for each of the children.
# These do not include the space taken up by the handles.
def getSizes( self ) :
return self._qtWidget().sizes()
## Sets the sizes of the children. Note that this will not change
# the overall size of the SplitContainer - instead the sizes are
# adjusted to take up all the space available. Therefore it is only
# the relative differences in sizes which are important.
# If animationDuration is non-zero then it specifies a period
# in milliseconds over which to adjust the sizes.
def setSizes( self, sizes, animationDuration=0 ) :
assert( len( sizes ) == len( self ) )
if self.getOrientation() == self.Orientation.Vertical :
availableSize = self.size().y
else :
availableSize = self.size().x
if len( self ) > 1 :
availableSize -= (len( self ) - 1) * self._qtWidget().handleWidth()
scaleFactor = availableSize / sum( sizes )
sizes = [ scaleFactor * x for x in sizes ]
if animationDuration == 0 :
self._qtWidget().setSizes( sizes )
else :
animation = _SizeAnimation( self._qtWidget(), sizes )
animation.setDuration( animationDuration )
self.__sizeAnimation = animation
animation.start()
## If a size animation is currently in progress, then returns the
# final sizes of the animation, otherwise returns None.
def targetSizes( self ) :
if self.__sizeAnimation is not None :
if self.__sizeAnimation.state() == _SizeAnimation.Running :
return self.__sizeAnimation.targetSizes()
return None
## Returns the handle to the right/bottom of the specified child index.
# Note that you should not attempt to reparent the handles, and you will
# be unable to access them after the SplitContainer itself has been destroyed.
def handle( self, index ) :
if index < 0 or index >= len( self ) - 1 :
raise IndexError()
qtHandle = self._qtWidget().handle( index + 1 )
assert( qtHandle.parent() is self._qtWidget() )
handle = self.__handleWidgets.get( qtHandle, None )
if handle is None :
handle = GafferUI.Widget( qtHandle )
self.__handleWidgets[qtHandle] = handle
return handle
def __getitem__( self, index ) :
return self.__widgets[index]
def __delitem__( self, index ) :
if isinstance( index, slice ) :
indices = range( *(index.indices( len( self ) )) )
toRemove = []
for i in indices :
toRemove.append( self[i] )
for c in toRemove :
self.removeChild( c )
else :
self.removeChild( self[index] )
def __len__( self ) :
return len( self.__widgets )
def __applySizePolicy( self, widget ) :
# this size policy allows the children to be cropped to any size - otherwise some stubbornly
# stay at a minimum size and then suddenly collapse to nothing when moving the splitter all
# the way. we store the original size policy on the widget and reapply it in removeChild().
widget.__originalSizePolicy = widget._qtWidget().sizePolicy()
widget._qtWidget().setSizePolicy( QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored )
# We inherit from QSplitter purely so that the handles can be created
# in Python rather than C++. This seems to help PyQt and PySide in tracking
# the lifetimes of the splitter and handles.
class _Splitter( QtGui.QSplitter ) :
def __init__( self ) :
QtGui.QSplitter.__init__( self )
# There seems to be an odd interaction between this and the stylesheet, and
# setting this to the desired size and then using the stylesheet to divide it into
# margins seems the only reliable way of sizing the handle.
## \todo This should come from the style once we've unified the Gadget and Widget
# styling.
self.setHandleWidth( 6 )
def createHandle( self ) :
return QtGui.QSplitterHandle( self.orientation(), self )
class _SizeAnimation( QtCore.QVariantAnimation ) :
def __init__( self, qSplitter, newSizes ) :
QtCore.QVariantAnimation.__init__( self, None )
self.__splitter = qSplitter
self.sizes = zip( qSplitter.sizes(), newSizes )
self.setStartValue( 0.0 )
self.setEndValue( 1.0 )
self.setEasingCurve( QtCore.QEasingCurve( QtCore.QEasingCurve.OutCubic ) )
def targetSizes( self ) :
return ( self.sizes[0][1], self.sizes[1][1] )
def updateCurrentValue( self, value ) :
value = GafferUI._Variant.fromVariant( value )
sizes = [ x[0] + ( x[1] - x[0] ) * value for x in self.sizes ]
self.__splitter.setSizes( sizes )
|
the-stack_106_29459 | import pygame
import random
pygame.init()
# Colors
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 190, 0)
# Creating window
screen_width = 900
screen_height = 600
gameWindow = pygame.display.set_mode((screen_width, screen_height))
# Game Title
pygame.display.set_caption("Snake King")
pygame.display.update()
# Game specific variables
exit_game = False
game_over = False
snake_x = 44
snake_y = 55
velocity_x = 0
velocity_y = 0
food_x = random.randint(10, screen_width/2)
food_y = random.randint(10, screen_height/2)
score = 0
snake_size = 25
fps = 30
clock = pygame.time.Clock()
font = pygame.font.SysFont(None,55)
def text_screen(text, color,x,y):
screen_text = font.render(text, True,color)
gameWindow.blit(screen_text,[x,y])
def plot_snake(gameWindow,color, snk_list,snake_size):
for x,y in snk_list:
pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])
snk_list = []
snk_length = 1
# Game Loop
while not exit_game:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
velocity_x = 7
velocity_y = 0
if event.key == pygame.K_LEFT:
velocity_x = - 7
velocity_y = 0
if event.key == pygame.K_UP:
velocity_y = - 7
velocity_x = 0
if event.key == pygame.K_DOWN:
velocity_y = 7
velocity_x = 0
snake_x = snake_x + velocity_x
snake_y = snake_y + velocity_y
if abs(snake_x - food_x)<14.2 and abs(snake_y - food_y)<14.2:
score +=10
food_x = random.randint(10, screen_width/2)
food_y = random.randint(10, screen_height/2)
snk_length += 5
gameWindow.fill(white)
text_screen("Score: "+ str(score), red,5,5)
pygame.draw.rect(gameWindow, red, [food_x, food_y, snake_size, snake_size])
head = []
head.append(snake_x)
head.append(snake_y)
snk_list.append(head)
if len(snk_list)>snk_length:
del snk_list[0]
# pygame.draw.rect(gameWindow, green, [snake_x, snake_y, snake_size, snake_size])
plot_snake(gameWindow,green, snk_list,snake_size)
pygame.display.update()
clock.tick(fps)
pygame.quit()
quit()
|
the-stack_106_29460 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNotebook(PythonPackage):
"""Jupyter Interactive Notebook"""
homepage = "https://github.com/jupyter/notebook"
url = "https://pypi.io/packages/source/n/notebook/notebook-4.2.3.tar.gz"
version('6.0.1', sha256='660976fe4fe45c7aa55e04bf4bccb9f9566749ff637e9020af3422f9921f9a5d')
version('4.2.3', sha256='39a9603d3fe88b60de2903680c965cf643acf2c16fb2c6bac1d905e1042b5851')
version('4.2.2', sha256='418ba230c9b2e7e739940cae9fb4625e10a63f038e9c95cf1a9b7a244256ba38')
version('4.2.1', sha256='a49de524dabb99f214bdf2a58f26c7892650251a23a3669c6492fb180492e197')
version('4.2.0', sha256='e10c4916c77b48394796b5b1440d61d7b210f9941194048fe20ef88948016d84')
version('4.1.0', sha256='b597437ba33538221008e21fea71cd01eda9da1515ca3963d7c74e44f4b03d90')
version('4.0.6', sha256='f62e7a6afbc00bab3615b927595d27b1874cff3218bddcbab62f97f6dae567c3')
version('4.0.4', sha256='a57852514bce1b1cf41fa0311f6cf894960cf68b083b55e6c408316b598d5648')
version('4.0.2', sha256='8478d7e2ab474855b0ff841f693983388af8662d3af1adcb861acb900274f22a')
depends_on('[email protected]:2.8,3.3:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='@6:')
depends_on('py-setuptools', type='build', when='@6:')
depends_on('py-jinja2', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-ipython-genutils', type=('build', 'run'))
depends_on('py-traitlets', type=('build', 'run'))
depends_on('py-jupyter-core', type=('build', 'run'))
depends_on('py-jupyter-client', type=('build', 'run'))
depends_on('py-nbformat', type=('build', 'run'))
depends_on('py-nbconvert', type=('build', 'run'))
depends_on('py-ipykernel', type=('build', 'run'))
depends_on('[email protected]:', when='@4.2.0:', type=('build', 'run'))
depends_on('py-ipywidgets', when="+terminal", type=('build', 'run'))
# required for 6.0.1, not sure about 5.x.x, as there is no version in spack
# right now treat them as 6-or-newer dependencies
depends_on('py-prometheus-client', type=('build', 'run'), when='@6:')
depends_on('py-terminado', type=('build', 'run'), when='@6:')
depends_on('py-send2trash', type=('build', 'run'), when='@6:')
depends_on('py-pyzmq@17:', type=('build', 'run'), when='@6:')
depends_on('[email protected]:', type=('build', 'run'), when='@6:')
depends_on('[email protected]:', type=('build', 'run'), when='@6:')
depends_on('[email protected]:', type=('build', 'run'), when='@6:')
depends_on('[email protected]:', type=('build', 'run'), when='@6:')
depends_on('py-ipaddress', type=('build', 'run'), when='@6: ^python@:2.8')
|
the-stack_106_29461 | import numpy as np
import io
import soundfile as sf
def syncToVideo(video, audio):
"""
:arg video is the wav from the video
:arg audio is the enhanced audio
:returns (video_offset, audio_offset), the values for syncing the two videos
"""
# step 1 -> extract numpy arrays from the two audio
video_data, video_samplerate = sf.read(io.BytesIO(video))
# print(video_data, video_data.shape[0], video_samplerate, flush=True)
audio_data, audio_samplerate = sf.read(io.BytesIO(audio))
# print(audio_data, audio_data.shape[0], audio_samplerate, flush=True)
# step 2
# overlay the two audio by using the window between them
# we approximate the results with 4 frames per second
def scale_signal(signal):
new_signal = np.copy(signal)
m, M = np.min(new_signal), np.max(new_signal)
new_signal = new_signal / (M - m)
return new_signal
signal1 = scale_signal(video_data)
signal2 = scale_signal(audio_data)
max_window = int(np.abs(video_data.shape[0] - audio_data.shape[0]) * 1.618)
print(max_window, flush=True)
# we scale down to 10 samples per second
down_factor = audio_samplerate // 10
max_window //= down_factor
scaled1 = scale_signal(
np.array([
np.mean(signal1[i:i+down_factor, 0]) for i in range(0, signal1.shape[0] // down_factor * down_factor, down_factor)
])
)
scaled2 = scale_signal(
np.array([
np.mean(signal2[i:i + down_factor, 0]) for i in range(0, signal2.shape[0] // down_factor * down_factor, down_factor)
])
)
np.save('scaled1.npy', scaled1)
np.save('scaled2.npy', scaled2)
offset1 = np.zeros(max_window)
offset2 = np.zeros(max_window)
min_shape = min(scaled1.shape[0], scaled2.shape[0])
# offset for 1:
for offset in range(max_window):
for i in range(min_shape - max_window - 1):
offset1[offset] += np.sum(np.abs(scaled1[i + offset] - scaled2[i]))
print(f'{offset} out of {max_window - 1} -> ', offset1[offset])
# offset for 2:
for offset in range(max_window):
for i in range(min_shape - max_window - 1):
offset2[offset] += np.sum(np.abs(scaled1[i] - scaled2[i + offset]))
print(f'{offset} out of {max_window - 1} -> ', offset2[offset])
print(np.min(offset1), np.min(offset2), flush=True)
video_offset, audio_offset = 0, 0
for i, offset in enumerate(offset1):
if offset < offset1[video_offset]:
video_offset = i
for i, offset in enumerate(offset2):
if offset < offset2[audio_offset]:
audio_offset = i
np.save('offset1.npy', offset1)
np.save('offset2.npy', offset2)
return video_offset, audio_offset
|
the-stack_106_29462 | """Support for Latticework AmberOS sensors."""
from __future__ import annotations
from datetime import datetime, timedelta
from typing import Any
import logging
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.config_entries import ConfigEntry
from homeassistant.components.sensor import SensorEntity
from homeassistant.util.dt import utcnow
from homeassistant.const import (
CONF_DISKS,
DATA_MEGABYTES,
DATA_GIGABYTES,
DATA_RATE_KILOBYTES_PER_SECOND,
DATA_TERABYTES,
)
from . import AmberOSApi, LwAmberOSBaseEntity, LwAmberOSDeviceEntity
from .const import (
AMBEROS_API,
COORDINATOR_CENTRAL,
CONF_VOLUMES,
DOMAIN,
ENTITY_UNIT_LOAD,
INFORMATION_SENSORS,
STORAGE_DISK_SENSORS,
STORAGE_VOL_SENSORS,
UTILISATION_SENSORS,
AmberOSSensorEntityDescription,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Latticework AmberOS Sensor."""
data = hass.data[DOMAIN][entry.unique_id]
api: AmberOSApi = data[AMBEROS_API]
coordinator = data[COORDINATOR_CENTRAL]
try:
entities: list[AmberOSUtilSensor | AmberOSStorageSensor | AmberOSInfoSensor] = [
AmberOSUtilSensor(api, coordinator, description)
for description in UTILISATION_SENSORS
]
# Handle all volumes
if api.storage.volumes_ids:
entities.extend(
[
AmberOSStorageSensor(api, coordinator, description, volume)
for volume in entry.data.get(CONF_VOLUMES, api.storage.volumes_ids)
for description in STORAGE_VOL_SENSORS
]
)
# Handle all disks
if api.storage.disks_ids:
entities.extend(
[
AmberOSStorageSensor(api, coordinator, description, disk)
for disk in entry.data.get(CONF_DISKS, api.storage.disks_ids)
for description in STORAGE_DISK_SENSORS
]
)
entities.extend(
[
AmberOSInfoSensor(api, coordinator, description)
for description in INFORMATION_SENSORS
]
)
async_add_entities(entities)
except AttributeError as e:
_LOGGER.error(e)
class AmberOSSensor(LwAmberOSBaseEntity, SensorEntity):
""" for sensor specific attributes."""
entity_description: AmberOSSensorEntityDescription
def __init__(
self,
api: AmberOSApi,
coordinator: DataUpdateCoordinator[dict[str, dict[str, Any]]],
description: AmberOSSensorEntityDescription,
) -> None:
"""Initialize the AmberOS sensor entity."""
super().__init__(api, coordinator, description)
class AmberOSUtilSensor(AmberOSSensor):
"""Representation a AmberOS Utilisation sensor."""
@property
def native_value(self) -> Any | None:
"""Return the state."""
attr = getattr(self._api.utilisation, self.entity_description.key)
if callable(attr):
attr = attr()
if attr is None:
return None
# Data (RAM)
if self.native_unit_of_measurement == DATA_MEGABYTES:
return round(attr / 1024.0 ** 2, 1)
# Network
if self.native_unit_of_measurement == DATA_RATE_KILOBYTES_PER_SECOND:
return round(attr / 1024.0, 1)
# CPU load average
if self.native_unit_of_measurement == ENTITY_UNIT_LOAD:
return round(attr / 100, 2)
return attr
@property
def available(self) -> bool:
"""Return True if entity is available."""
return bool(self._api.utilisation)
class AmberOSStorageSensor(LwAmberOSDeviceEntity, AmberOSSensor):
"""Representation a AmberOS Storage sensor."""
entity_description: AmberOSSensorEntityDescription
def __init__(
self,
api: AmberOSApi,
coordinator: DataUpdateCoordinator[dict[str, dict[str, Any]]],
description: AmberOSSensorEntityDescription,
device_id: str | None = None,
) -> None:
"""Initialize the AmerOS storage sensor entity."""
super().__init__(api, coordinator, description, device_id)
@property
def native_value(self) -> Any | None:
"""Return the state."""
attr = getattr(self._api.storage, self.entity_description.key)(self._device_id)
if attr is None:
return None
# Data (disk space)
if self.native_unit_of_measurement == DATA_TERABYTES:
return round(attr / 1024.0 ** 4, 2)
if self.native_unit_of_measurement == DATA_GIGABYTES:
return round(attr / 1024.0 ** 3, 2)
return attr
@property
def extra_state_attributes(self) -> dict[str, str]:
"""Return storage details."""
attr = self._attr_extra_state_attributes
if self.entity_description.key == "disk_status":
attr.update(getattr(self._api.storage, "get_disk")(self._device_id))
if self.entity_description.key == "volume_status":
attr.update(getattr(self._api.storage, "get_volume")(self._device_id))
return attr
class AmberOSInfoSensor(AmberOSSensor):
"""Representation a AmberOS information sensor."""
def __init__(
self,
api: AmberOSApi,
coordinator: DataUpdateCoordinator[dict[str, dict[str, Any]]],
description: AmberOSSensorEntityDescription,
) -> None:
"""Initialize the AmberOS Info entity."""
super().__init__(api, coordinator, description)
self._previous_uptime: str | None = None
self._last_boot: datetime | None = None
@property
def native_value(self) -> Any | None:
"""Return the state."""
attr = getattr(self._api.information, self.entity_description.key)
if attr is None:
return None
return attr
@property
def extra_state_attributes(self) -> dict[str, str]:
"""Return info details."""
attr = self._attr_extra_state_attributes
if "status" in self.entity_description.key:
attr.update(self._api.backup.status_by_check)
return attr
|
the-stack_106_29463 | import asyncio
import logging
import sys
import traceback
import warnings
from kafka.partitioner.default import DefaultPartitioner
from kafka.codec import has_gzip, has_snappy, has_lz4, has_zstd
from aiokafka.client import AIOKafkaClient
from aiokafka.errors import (
MessageSizeTooLargeError, UnsupportedVersionError, IllegalOperation)
from aiokafka.record.legacy_records import LegacyRecordBatchBuilder
from aiokafka.structs import TopicPartition
from aiokafka.util import (
INTEGER_MAX_VALUE, commit_structure_validate, create_task, get_running_loop
)
from .message_accumulator import MessageAccumulator
from .sender import Sender
from .transaction_manager import TransactionManager
log = logging.getLogger(__name__)
_missing = object()
class AIOKafkaProducer(object):
"""A Kafka client that publishes records to the Kafka cluster.
The producer consists of a pool of buffer space that holds records that
haven't yet been transmitted to the server as well as a background task
that is responsible for turning these records into requests and
transmitting them to the cluster.
The send() method is asynchronous. When called it adds the record to a
buffer of pending record sends and immediately returns. This allows the
producer to batch together individual records for efficiency.
The 'acks' config controls the criteria under which requests are considered
complete. The "all" setting will result in waiting for all replicas to
respond, the slowest but most durable setting.
The key_serializer and value_serializer instruct how to turn the key and
value objects the user provides into bytes.
Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the producer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client.
Default: 'aiokafka-producer-#' (appended with a unique number
per instance)
key_serializer (callable): used to convert user-supplied keys to bytes
If not None, called as f(key), should return bytes. Default: None.
value_serializer (callable): used to convert user-supplied message
values to bytes. If not None, called as f(value), should return
bytes. Default: None.
acks (0, 1, 'all'): The number of acknowledgments the producer requires
the leader to have received before considering a request complete.
This controls the durability of records that are sent. The
following settings are common:
0: Producer will not wait for any acknowledgment from the server
at all. The message will immediately be added to the socket
buffer and considered sent. No guarantee can be made that the
server has received the record in this case, and the retries
configuration will not take effect (as the client won't
generally know of any failures). The offset given back for each
record will always be set to -1.
1: The broker leader will write the record to its local log but
will respond without awaiting full acknowledgement from all
followers. In this case should the leader fail immediately
after acknowledging the record but before the followers have
replicated it then the record will be lost.
all: The broker leader will wait for the full set of in-sync
replicas to acknowledge the record. This guarantees that the
record will not be lost as long as at least one in-sync replica
remains alive. This is the strongest available guarantee.
If unset, defaults to *acks=1*. If ``enable_idempotence`` is
``True`` defaults to *acks=all*
compression_type (str): The compression type for all data generated by
the producer. Valid values are 'gzip', 'snappy', 'lz4', 'zstd', or
None. Compression is of full batches of data, so the efficacy of
batching will also impact the compression ratio (more batching
means better compression). Default: None.
max_batch_size (int): Maximum size of buffered data per partition.
After this amount `send` coroutine will block until batch is
drained.
Default: 16384
linger_ms (int): The producer groups together any records that arrive
in between request transmissions into a single batched request.
Normally this occurs only under load when records arrive faster
than they can be sent out. However in some circumstances the client
may want to reduce the number of requests even under moderate load.
This setting accomplishes this by adding a small amount of
artificial delay; that is, if first request is processed faster,
than `linger_ms`, producer will wait `linger_ms - process_time`.
This setting defaults to 0 (i.e. no delay).
partitioner (callable): Callable used to determine which partition
each message is assigned to. Called (after key serialization):
partitioner(key_bytes, all_partitions, available_partitions).
The default partitioner implementation hashes each non-None key
using the same murmur2 algorithm as the Java client so that
messages with the same key are assigned to the same partition.
When a key is None, the message is delivered to a random partition
(filtered to partitions with available leaders only, if possible).
max_request_size (int): The maximum size of a request. This is also
effectively a cap on the maximum record size. Note that the server
has its own cap on record size which may be different from this.
This setting will limit the number of record batches the producer
will send in a single request to avoid sending huge requests.
Default: 1048576.
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
request_timeout_ms (int): Produce request timeout in milliseconds.
As it's sent as part of ProduceRequest (it's a blocking call),
maximum waiting time can be up to 2 * request_timeout_ms.
Default: 40000.
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
api_version (str): specify which kafka API version to use.
If set to 'auto', will attempt to infer the broker version by
probing various APIs. Default: auto
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
socket connections. Directly passed into asyncio's
`create_connection`_. For more information see :ref:`ssl_auth`.
Default: None.
connections_max_idle_ms (int): Close idle connections after the number
of milliseconds specified by this config. Specifying `None` will
disable idle checks. Default: 540000 (9 minutes).
enable_idempotence (bool): When set to ``True``, the producer will
ensure that exactly one copy of each message is written in the
stream. If ``False``, producer retries due to broker failures,
etc., may write duplicates of the retried message in the stream.
Note that enabling idempotence acks to set to 'all'. If it is not
explicitly set by the user it will be chosen. If incompatible
values are set, a ``ValueError`` will be thrown.
New in version 0.5.0.
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER.
Default: PLAIN
sasl_plain_username (str): username for sasl PLAIN authentication.
Default: None
sasl_plain_password (str): password for sasl PLAIN authentication.
Default: None
sasl_oauth_token_provider (kafka.oauth.abstract.AbstractTokenProvider):
OAuthBearer token provider instance. (See kafka.oauth.abstract).
Default: None
Note:
Many configuration parameters are taken from the Java client:
https://kafka.apache.org/documentation.html#producerconfigs
"""
_PRODUCER_CLIENT_ID_SEQUENCE = 0
_COMPRESSORS = {
'gzip': (has_gzip, LegacyRecordBatchBuilder.CODEC_GZIP),
'snappy': (has_snappy, LegacyRecordBatchBuilder.CODEC_SNAPPY),
'lz4': (has_lz4, LegacyRecordBatchBuilder.CODEC_LZ4),
'zstd': (has_zstd, LegacyRecordBatchBuilder.CODEC_ZSTD)
}
_closed = None # Serves as an uninitialized flag for __del__
_source_traceback = None
def __init__(self, *, loop=None, bootstrap_servers='localhost',
client_id=None,
metadata_max_age_ms=300000, request_timeout_ms=40000,
api_version='auto', acks=_missing,
key_serializer=None, value_serializer=None,
compression_type=None, max_batch_size=16384,
partitioner=DefaultPartitioner(), max_request_size=1048576,
linger_ms=0, send_backoff_ms=100,
retry_backoff_ms=100, security_protocol="PLAINTEXT",
ssl_context=None, connections_max_idle_ms=540000,
enable_idempotence=False, transactional_id=None,
transaction_timeout_ms=60000, sasl_mechanism="PLAIN",
sasl_plain_password=None, sasl_plain_username=None,
sasl_kerberos_service_name='kafka',
sasl_kerberos_domain_name=None,
sasl_oauth_token_provider=None):
if loop is None:
loop = get_running_loop()
else:
warnings.warn("The loop argument is deprecated since 0.7.1, "
"and scheduled for removal in 0.8.0",
DeprecationWarning, stacklevel=2)
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self._loop = loop
if acks not in (0, 1, -1, 'all', _missing):
raise ValueError("Invalid ACKS parameter")
if compression_type not in ('gzip', 'snappy', 'lz4', 'zstd', None):
raise ValueError("Invalid compression type!")
if compression_type:
checker, compression_attrs = self._COMPRESSORS[compression_type]
if not checker():
raise RuntimeError(
f"Compression library for {compression_type} not found")
else:
compression_attrs = 0
if transactional_id is not None:
enable_idempotence = True
else:
transaction_timeout_ms = INTEGER_MAX_VALUE
if enable_idempotence:
if acks is _missing:
acks = -1
elif acks not in ('all', -1):
raise ValueError(
f"acks={acks} not supported if enable_idempotence=True")
self._txn_manager = TransactionManager(
transactional_id, transaction_timeout_ms)
else:
self._txn_manager = None
if acks is _missing:
acks = 1
elif acks == 'all':
acks = -1
AIOKafkaProducer._PRODUCER_CLIENT_ID_SEQUENCE += 1
if client_id is None:
client_id = f'aiokafka-producer-' \
f'{AIOKafkaProducer._PRODUCER_CLIENT_ID_SEQUENCE}'
self._key_serializer = key_serializer
self._value_serializer = value_serializer
self._compression_type = compression_type
self._partitioner = partitioner
self._max_request_size = max_request_size
self._request_timeout_ms = request_timeout_ms
self.client = AIOKafkaClient(
loop=loop, bootstrap_servers=bootstrap_servers,
client_id=client_id, metadata_max_age_ms=metadata_max_age_ms,
request_timeout_ms=request_timeout_ms,
retry_backoff_ms=retry_backoff_ms,
api_version=api_version, security_protocol=security_protocol,
ssl_context=ssl_context,
connections_max_idle_ms=connections_max_idle_ms,
sasl_mechanism=sasl_mechanism,
sasl_plain_username=sasl_plain_username,
sasl_plain_password=sasl_plain_password,
sasl_kerberos_service_name=sasl_kerberos_service_name,
sasl_kerberos_domain_name=sasl_kerberos_domain_name,
sasl_oauth_token_provider=sasl_oauth_token_provider)
self._metadata = self.client.cluster
self._message_accumulator = MessageAccumulator(
self._metadata, max_batch_size, compression_attrs,
self._request_timeout_ms / 1000, txn_manager=self._txn_manager,
loop=loop)
self._sender = Sender(
self.client, acks=acks, txn_manager=self._txn_manager,
retry_backoff_ms=retry_backoff_ms, linger_ms=linger_ms,
message_accumulator=self._message_accumulator,
request_timeout_ms=request_timeout_ms)
self._closed = False
# Warn if producer was not closed properly
# We don't attempt to close the Consumer, as __del__ is synchronous
def __del__(self, _warnings=warnings):
if self._closed is False:
_warnings.warn(f"Unclosed AIOKafkaProducer {self!r}",
ResourceWarning,
source=self)
context = {'producer': self,
'message': 'Unclosed AIOKafkaProducer'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
async def start(self):
"""Connect to Kafka cluster and check server version"""
assert self._loop is get_running_loop(), (
"Please create objects with the same loop as running with"
)
log.debug("Starting the Kafka producer") # trace
await self.client.bootstrap()
if self._compression_type == 'lz4':
assert self.client.api_version >= (0, 8, 2), \
'LZ4 Requires >= Kafka 0.8.2 Brokers'
if self._txn_manager is not None and self.client.api_version < (0, 11):
raise UnsupportedVersionError(
"Idempotent producer available only for Broker version 0.11"
" and above")
await self._sender.start()
self._message_accumulator.set_api_version(self.client.api_version)
self._producer_magic = 0 if self.client.api_version < (0, 10) else 1
log.debug("Kafka producer started")
async def flush(self):
"""Wait until all batches are Delivered and futures resolved"""
await self._message_accumulator.flush()
async def stop(self):
"""Flush all pending data and close all connections to kafka cluster"""
if self._closed:
return
self._closed = True
# If the sender task is down there is no way for accumulator to flush
if self._sender is not None and self._sender.sender_task is not None:
await asyncio.wait([
create_task(self._message_accumulator.close()),
self._sender.sender_task],
return_when=asyncio.FIRST_COMPLETED)
await self._sender.close()
await self.client.close()
log.debug("The Kafka producer has closed.")
async def partitions_for(self, topic):
"""Returns set of all known partitions for the topic."""
return await self.client._wait_on_metadata(topic)
def _serialize(self, topic, key, value):
serialized_key = self._key_serializer(key) if self._key_serializer else key
if self._value_serializer:
serialized_value = self._value_serializer(value)
else:
serialized_value = value
message_size = LegacyRecordBatchBuilder.record_overhead(
self._producer_magic)
if serialized_key is not None:
message_size += len(serialized_key)
if serialized_value is not None:
message_size += len(serialized_value)
if message_size > self._max_request_size:
raise MessageSizeTooLargeError(
f"The message is {message_size:d} bytes when serialized "
f"which is larger than the maximum request size you have "
f"configured with the max_request_size configuration")
return serialized_key, serialized_value
def _partition(self, topic, partition, key, value,
serialized_key, serialized_value):
if partition is not None:
assert partition >= 0
assert partition in self._metadata.partitions_for_topic(topic), \
'Unrecognized partition'
return partition
all_partitions = list(self._metadata.partitions_for_topic(topic))
available = list(self._metadata.available_partitions_for_topic(topic))
return self._partitioner(
serialized_key, all_partitions, available)
async def send(
self, topic, value=None, key=None, partition=None,
timestamp_ms=None, headers=None
):
"""Publish a message to a topic.
Arguments:
topic (str): topic where the message will be published
value (optional): message value. Must be type bytes, or be
serializable to bytes via configured value_serializer. If value
is None, key is required and message acts as a 'delete'.
See kafka compaction documentation for more details:
http://kafka.apache.org/documentation.html#compaction
(compaction requires kafka >= 0.8.1)
partition (int, optional): optionally specify a partition. If not
set, the partition will be selected using the configured
'partitioner'.
key (optional): a key to associate with the message. Can be used to
determine which partition to send the message to. If partition
is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same
partition (but if key is None, partition is chosen randomly).
Must be type bytes, or be serializable to bytes via configured
key_serializer.
timestamp_ms (int, optional): epoch milliseconds (from Jan 1 1970
UTC) to use as the message timestamp. Defaults to current time.
headers (optional): Kafka headers to be included in the message using
the format [("key", b"value")]. Iterable of tuples where key is a
normal string and value is a byte string.
Returns:
asyncio.Future: object that will be set when message is
processed
Raises:
kafka.KafkaTimeoutError: if we can't schedule this record (
pending buffer is full) in up to `request_timeout_ms`
milliseconds.
Note:
The returned future will wait based on `request_timeout_ms`
setting. Cancelling the returned future **will not** stop event
from being sent, but cancelling the ``send`` coroutine itself
**will**.
"""
assert value is not None or self.client.api_version >= (0, 8, 1), (
'Null messages require kafka >= 0.8.1')
assert value is not None or key is not None, 'Need at least one: key or value'
# first make sure the metadata for the topic is available
await self.client._wait_on_metadata(topic)
# Ensure transaction is started and not committing
if self._txn_manager is not None:
txn_manager = self._txn_manager
if txn_manager.transactional_id is not None and \
not self._txn_manager.is_in_transaction():
raise IllegalOperation(
"Can't send messages while not in transaction")
if headers is not None:
if self.client.api_version < (0, 11):
raise UnsupportedVersionError(
"Headers not supported before Kafka 0.11")
else:
# Record parser/builder support only list type, no explicit None
headers = []
key_bytes, value_bytes = self._serialize(topic, key, value)
partition = self._partition(topic, partition, key, value,
key_bytes, value_bytes)
tp = TopicPartition(topic, partition)
log.debug("Sending (key=%s value=%s) to %s", key, value, tp)
fut = await self._message_accumulator.add_message(
tp, key_bytes, value_bytes, self._request_timeout_ms / 1000,
timestamp_ms=timestamp_ms, headers=headers)
return fut
async def send_and_wait(
self, topic, value=None, key=None, partition=None,
timestamp_ms=None, headers=None
):
"""Publish a message to a topic and wait the result"""
future = await self.send(
topic, value, key, partition, timestamp_ms, headers)
return await future
def create_batch(self):
"""Create and return an empty BatchBuilder.
The batch is not queued for send until submission to ``send_batch``.
Returns:
BatchBuilder: empty batch to be filled and submitted by the caller.
"""
return self._message_accumulator.create_builder()
async def send_batch(self, batch, topic, *, partition):
"""Submit a BatchBuilder for publication.
Arguments:
batch (BatchBuilder): batch object to be published.
topic (str): topic where the batch will be published.
partition (int): partition where this batch will be published.
Returns:
asyncio.Future: object that will be set when the batch is
delivered.
"""
# first make sure the metadata for the topic is available
await self.client._wait_on_metadata(topic)
# We only validate we have the partition in the metadata here
partition = self._partition(topic, partition, None, None, None, None)
# Ensure transaction is started and not committing
if self._txn_manager is not None:
txn_manager = self._txn_manager
if txn_manager.transactional_id is not None and \
not self._txn_manager.is_in_transaction():
raise IllegalOperation(
"Can't send messages while not in transaction")
tp = TopicPartition(topic, partition)
log.debug("Sending batch to %s", tp)
future = await self._message_accumulator.add_batch(
batch, tp, self._request_timeout_ms / 1000)
return future
def _ensure_transactional(self):
if self._txn_manager is None or \
self._txn_manager.transactional_id is None:
raise IllegalOperation(
"You need to configure transaction_id to use transactions")
async def begin_transaction(self):
self._ensure_transactional()
log.debug(
"Beginning a new transaction for id %s",
self._txn_manager.transactional_id)
await asyncio.shield(
self._txn_manager.wait_for_pid()
)
self._txn_manager.begin_transaction()
async def commit_transaction(self):
self._ensure_transactional()
log.debug(
"Committing transaction for id %s",
self._txn_manager.transactional_id)
self._txn_manager.committing_transaction()
await asyncio.shield(
self._txn_manager.wait_for_transaction_end(),
)
async def abort_transaction(self):
self._ensure_transactional()
log.debug(
"Aborting transaction for id %s",
self._txn_manager.transactional_id)
self._txn_manager.aborting_transaction()
await asyncio.shield(
self._txn_manager.wait_for_transaction_end(),
)
def transaction(self):
return TransactionContext(self)
async def send_offsets_to_transaction(self, offsets, group_id):
self._ensure_transactional()
if not self._txn_manager.is_in_transaction():
raise IllegalOperation("Not in the middle of a transaction")
if not group_id or not isinstance(group_id, str):
raise ValueError(group_id)
# validate `offsets` structure
formatted_offsets = commit_structure_validate(offsets)
log.debug(
"Begin adding offsets %s for consumer group %s to transaction",
formatted_offsets, group_id)
fut = self._txn_manager.add_offsets_to_txn(formatted_offsets, group_id)
await asyncio.shield(fut)
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
class TransactionContext:
def __init__(self, producer):
self._producer = producer
async def __aenter__(self):
await self._producer.begin_transaction()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
# If called directly we want the API to raise a InvalidState error,
# but when exiting a context manager we should just let it out
if self._producer._txn_manager.is_fatal_error():
return
await self._producer.abort_transaction()
else:
await self._producer.commit_transaction()
|
the-stack_106_29464 | """时间: 2019/12/24
作者: [email protected]
更改记录:
重要说明:
"""
import json
import logging
import os
import threading
from django.conf import settings
import django
import pika
from utils import config
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wol_server.settings")
django.setup()
logger = logging.getLogger(settings.ELK_APP_NAME)
CREDENTIALS = pika.PlainCredentials(config.RABBITMQ_USERNAME, config.RABBITMQ_PASSWORD)
PARAMETERS = pika.ConnectionParameters(config.RABBITMQ_HOST, config.RABBITMQ_PORT, config.RABBITMQ_VHOST,
CREDENTIALS, socket_timeout=3)
class RabbitMQConnection(threading.Thread):
"""定义RabbitMQ连接及消息处理类
"""
SINGLETON_CLIENT_CONNECTION = None
SINGLETON_CLIENT_CHANNEL = None
IN_CONNECT = False # flag
LOCK = threading.Lock()
def __init__(self):
threading.Thread.__init__(self)
@classmethod
def _reconnect(cls):
"""同步连接RabbitMQ时会存在阻塞(阻塞会导致系统进行线程切换),本函数需要考虑多线程调用时线程安全问题。期望处于连接过程中,
其他线程不要再进行连接,而是直接抛出异常
Returns:
Raises:
InConnectionException: 连接异常
"""
with cls.LOCK:
if not cls.IN_CONNECT and (not cls.SINGLETON_CLIENT_CONNECTION or
cls.SINGLETON_CLIENT_CONNECTION.is_closed or
not cls.SINGLETON_CLIENT_CHANNEL or
cls.SINGLETON_CLIENT_CHANNEL.is_closed):
cls.IN_CONNECT = True
dispatch = "do_connect"
else:
dispatch = "raise_exception"
if dispatch == "do_connect":
try:
cls.SINGLETON_CLIENT_CONNECTION = pika.BlockingConnection(PARAMETERS)
cls.SINGLETON_CLIENT_CHANNEL = cls.SINGLETON_CLIENT_CONNECTION.channel()
finally:
# 此处仅保证IN_CONNECT一定被设置为False,异常交给外层函数处理
with cls.LOCK:
cls.IN_CONNECT = False
else:
raise InConnectionException()
@classmethod
def _async_reconnect(cls):
"""相比同步连接RabbitMQ方式,异步连接可以减少连接时由于RabbitMQ本身的不响应,导致连接阻塞过长,进而产生影响系统业务的后果
Returns:
Raises:
InConnectionException: 连接异常
"""
with cls.LOCK:
if not cls.IN_CONNECT and (not cls.SINGLETON_CLIENT_CONNECTION or cls.SINGLETON_CLIENT_CONNECTION.is_closed
or not cls.SINGLETON_CLIENT_CHANNEL or cls.SINGLETON_CLIENT_CHANNEL.is_closed):
cls.IN_CONNECT = True
dispatch = "do_connect"
else:
dispatch = "raise_exception"
if dispatch == "do_connect":
def _on_open_callback(*args, **kwargs):
"""connection open callback
Args:
*args (tuple): 不定长参数
**kwargs (dict): 不定长参数
Returns:
"""
def _on_channel_open(*args, **kwargs):
"""channel open callback
Args:
*args (tuple): 不定长参数
**kwargs (dict): 不定长参数
Returns:
"""
with cls.LOCK:
cls.IN_CONNECT = False
cls.SINGLETON_CLIENT_CHANNEL.basic_publish(exchange=config.EXCHANGE,
routing_key=config.ROUTING_KEY,
body="channel is opening",
properties=pika.BasicProperties(delivery_mode=2))
try:
cls.SINGLETON_CLIENT_CHANNEL = cls.SINGLETON_CLIENT_CONNECTION.channel(
on_open_callback=_on_channel_open)
except Exception as channel_open_error:
logger.error("channel open error: {}".format(str(channel_open_error)))
cls._process_execption() # 释放连接资源
with cls.LOCK:
cls.IN_CONNECT = False
def _on_open_error_callback(*args, **kwargs):
"""connection open error callback
Args:
*args (tuple): 不定长参数
**kwargs (dict): 不定长参数
Returns:
"""
cls._process_execption()
with cls.LOCK:
cls.IN_CONNECT = False
def _rabbit_ioloop_process(connection):
"""RabbitMQ ioloop
Args:
connection (object): pika.SelectConnection对象
Returns:
"""
try:
# ioloop: pika.adapters.base_connection
# start: pika.adapters.utils.selector_ioloop_adapter
connection.ioloop.start()
except Exception as rabbit_ioloop_error:
logger.error("RabbitMQ ioloop error: {}".format(str(rabbit_ioloop_error)))
cls._process_execption()
try:
cls.SINGLETON_CLIENT_CONNECTION = pika.SelectConnection(parameters=PARAMETERS,
on_open_callback=_on_open_callback,
on_open_error_callback=_on_open_error_callback)
threading.Thread(target=_rabbit_ioloop_process, args=(cls.SINGLETON_CLIENT_CONNECTION,)).start()
except Exception as async_connect_error:
logger.error("async connect failed: {}".format(str(async_connect_error)))
# 开始异步连接失败时,IN_CONNECT设置为False,连接开始后又callback回调函数修改IN_CONNECT
with cls.LOCK:
cls.IN_CONNECT = False
else:
raise InConnectionException()
@classmethod
def _process_execption(cls):
"""exception process
Returns:
"""
try:
if cls.SINGLETON_CLIENT_CHANNEL:
cls.SINGLETON_CLIENT_CHANNEL.close()
if cls.SINGLETON_CLIENT_CONNECTION:
cls.SINGLETON_CLIENT_CONNECTION.close()
except Exception as connect_rabbitmq_error:
logger.error("close rabbitmq connection failed: {}".format(str(connect_rabbitmq_error)))
finally:
cls.SINGLETON_CLIENT_CHANNEL = None
cls.SINGLETON_CLIENT_CONNECTION = None
def rabbitmq_connection_setup(self):
"""建立RabbitMQ连接
Returns:
"""
try:
# self._async_reconnect()
self._reconnect()
except Exception as rabbitmq_connection_setup_error:
logger.error("RabbitMQ connection setup failed: {}".format(str(rabbitmq_connection_setup_error)))
self._process_execption()
@classmethod
def send_rabbitmq_message(cls, message, routing_key, durable):
"""发送消息到RabbitMQ
Args:
message (str): 发送的消息
routing_key (str): 路由键
durable (bool): queue是否持久化,默认False
Returns:
tuple: 发送成功返回True,OK,发送失败返回False,描述
"""
ret = (True, "OK")
try:
if not cls.IN_CONNECT and (not cls.SINGLETON_CLIENT_CONNECTION or
cls.SINGLETON_CLIENT_CONNECTION.is_closed or not
cls.SINGLETON_CLIENT_CHANNEL or cls.SINGLETON_CLIENT_CHANNEL.is_closed):
# cls._async_reconnect()
cls._reconnect()
if cls.SINGLETON_CLIENT_CHANNEL:
if durable:
send_properties = pika.BasicProperties(delivery_mode=2)
else:
send_properties = None
cls.SINGLETON_CLIENT_CHANNEL.basic_publish(exchange=config.EXCHANGE,
routing_key=routing_key,
body=json.dumps(message),
properties=send_properties)
else:
ret = (False, "RabbitMQ connection is not ready!")
except InConnectionException as in_connection_error:
logger.warning("RabbitMQ connection exception: {}".format(str(in_connection_error)))
except Exception as other_error:
logger.error("send msg({}) to RabbitMQ({}) port({}) vhost({}) exchange({}) routing_key({}) failed!".format(
message, config.RABBITMQ_HOST, config.RABBITMQ_PORT, config.RABBITMQ_VHOST, config.EXCHANGE,
config.ROUTING_KEY
))
logger.error("Unexpected error occur: {}".format(str(other_error)))
cls._process_execption()
ret = (False, "Exception error")
return ret
class InConnectionException(Exception):
"""定义连接异常类
"""
def __str__(self):
"""抛异常时的打印函数
Returns:
str: 异常信息
"""
return "The main thread is connecting the rabbitmq host."
|
the-stack_106_29466 | class Solution:
def partition(self, head: ListNode, x: int) -> ListNode:
dummy_1 = ListNode(0, head)
dummy_2 = ListNode(0, head)
p, h = dummy_1, head
n = dummy_2
while h:
if h.val<x:
n.next = h
n = n.next
p.next = h.next
else:
p = h
h = h.next
n.next = dummy_1.next
return dummy_2.next
|
the-stack_106_29468 | import xarray as xr
import numpy as np
from loaders.mappers._base import GeoMapper
from loaders.mappers._xarray import XarrayMapper
from loaders._config import mapper_functions
from loaders.mappers._fine_res import (
open_zarr,
standardize_coords,
MLTendencies,
)
from loaders.mappers._fine_res_budget import compute_fine_res_sources
def _open_fine_resolution_nudging_hybrid_dataset(
# created by this commit:
# https://github.com/VulcanClimateModeling/vcm-workflow-control/commit/3c852d0e4f8b86c4e88db9f29f0b8e484aeb77a1
# I manually consolidated the metadata with zarr.consolidate_metadata
fine_url: str = "gs://vcm-ml-experiments/default/2021-04-27/2020-05-27-40-day-X-SHiELD-simulation/fine-res-budget.zarr", # noqa: E501
# created by this commit
# https://github.com/VulcanClimateModeling/vcm-workflow-control/commit/dd4498bcf3143d05095bf9ff4ca3f1341ba25330
nudge_url="gs://vcm-ml-experiments/2021-04-13-n2f-c3072/3-hrly-ave-rad-precip-setting-30-min-rad-timestep-shifted-start-tke-edmf", # noqa: E501
include_temperature_nudging: bool = False,
) -> xr.Dataset:
fine = open_zarr(fine_url)
fine_shifted = standardize_coords(fine)
fine_shifted["Q1"], fine_shifted["Q2"] = compute_fine_res_sources(
fine_shifted, include_temperature_nudging
)
return _open_nudged_hybrid_portion(fine_shifted, nudge_url)
def _open_precomputed_fine_resolution_nudging_hybrid_dataset(
fine_url: str, nudge_url: str,
) -> xr.Dataset:
fine = open_zarr(fine_url)
return _open_nudged_hybrid_portion(fine, nudge_url)
def _open_nudged_hybrid_portion(
fine_shifted: xr.Dataset, nudge_url: str
) -> MLTendencies:
nudge_physics_tendencies = open_zarr(nudge_url + "/physics_tendencies.zarr",)
nudge_state = open_zarr(nudge_url + "/state_after_timestep.zarr")
nudge_tends = open_zarr(nudge_url + "/nudging_tendencies.zarr")
merged = xr.merge(
[fine_shifted, nudge_state, nudge_physics_tendencies], join="inner",
)
# dQ1,2,u,v
# "hybrid" definitions for humidity and moisture
merged["dQ1"] = (
merged["Q1"] - merged["tendency_of_air_temperature_due_to_fv3_physics"]
)
merged["dQ2"] = (
merged["Q2"] - merged["tendency_of_specific_humidity_due_to_fv3_physics"]
)
merged["dQxwind"] = nudge_tends.x_wind_tendency_due_to_nudging
merged["dQywind"] = nudge_tends.y_wind_tendency_due_to_nudging
# drop time from lat and lon
merged["latitude"] = merged.latitude.isel(time=0)
merged["longitude"] = merged.longitude.isel(time=0)
return merged.astype(np.float32)
@mapper_functions.register
def open_fine_resolution_nudging_hybrid(
fine_url: str = "", nudge_url: str = "", include_temperature_nudging: bool = False,
) -> GeoMapper:
"""
Open the fine resolution nudging_hybrid mapper
Args:
fine_url: url where coarsened fine resolution data is stored
nudge_url: url to nudging data to be used as a residual
include_temperature_nudging: whether to include fine-res nudging in Q1
Returns:
a mapper
"""
return XarrayMapper(
_open_fine_resolution_nudging_hybrid_dataset(
fine_url=fine_url,
nudge_url=nudge_url,
include_temperature_nudging=include_temperature_nudging,
)
)
@mapper_functions.register
def open_precomputed_fine_resolution_nudging_hybrid(
fine_url: str, nudge_url: str,
) -> GeoMapper:
"""
Open the fine resolution nudging hybrid mapper with precomputed fine-res data
Args:
fine_url: url where coarsened fine resolution data is stored, must include
precomputed Q1 and Q2
nudge_url: url to nudging data to be used as a residual
Returns:
a mapper
"""
return XarrayMapper(
_open_precomputed_fine_resolution_nudging_hybrid_dataset(
fine_url=fine_url, nudge_url=nudge_url
)
)
|
the-stack_106_29470 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
from typing import Dict, Optional
from airflow.configuration import conf
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.utils.decorators import apply_defaults
from airflow.utils.operator_helpers import context_to_airflow_vars
class HiveOperator(BaseOperator):
"""
Executes hql code or hive script in a specific Hive database.
:param hql: the hql to be executed. Note that you may also use
a relative path from the dag file of a (template) hive
script. (templated)
:type hql: str
:param hive_cli_conn_id: reference to the Hive database. (templated)
:type hive_cli_conn_id: str
:param hiveconfs: if defined, these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``
:type hiveconfs: dict
:param hiveconf_jinja_translate: when True, hiveconf-type templating
${var} gets translated into jinja-type templating {{ var }} and
${hiveconf:var} gets translated into jinja-type templating {{ var }}.
Note that you may want to use this along with the
``DAG(user_defined_macros=myargs)`` parameter. View the DAG
object documentation for more details.
:type hiveconf_jinja_translate: bool
:param script_begin_tag: If defined, the operator will get rid of the
part of the script before the first occurrence of `script_begin_tag`
:type script_begin_tag: str
:param run_as_owner: Run HQL code as a DAG's owner.
:type run_as_owner: bool
:param mapred_queue: queue used by the Hadoop CapacityScheduler. (templated)
:type mapred_queue: str
:param mapred_queue_priority: priority within CapacityScheduler queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: str
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: str
"""
template_fields = ('hql', 'schema', 'hive_cli_conn_id', 'mapred_queue',
'hiveconfs', 'mapred_job_name', 'mapred_queue_priority')
template_ext = ('.hql', '.sql',)
ui_color = '#f0e4ec'
@apply_defaults
def __init__(
self,
hql: str,
hive_cli_conn_id: str = 'hive_cli_default',
schema: str = 'default',
hiveconfs: Optional[Dict] = None,
hiveconf_jinja_translate: bool = False,
script_begin_tag: Optional[str] = None,
run_as_owner: bool = False,
mapred_queue: Optional[str] = None,
mapred_queue_priority: Optional[str] = None,
mapred_job_name: Optional[str] = None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.hql = hql
self.hive_cli_conn_id = hive_cli_conn_id
self.schema = schema
self.hiveconfs = hiveconfs or {}
self.hiveconf_jinja_translate = hiveconf_jinja_translate
self.script_begin_tag = script_begin_tag
self.run_as = None
if run_as_owner:
self.run_as = self.dag.owner
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
self.mapred_job_name_template = conf.get('hive',
'mapred_job_name_template')
# assigned lazily - just for consistency we can create the attribute with a
# `None` initial value, later it will be populated by the execute method.
# This also makes `on_kill` implementation consistent since it assumes `self.hook`
# is defined.
self.hook = None
def get_hook(self):
return HiveCliHook(
hive_cli_conn_id=self.hive_cli_conn_id,
run_as=self.run_as,
mapred_queue=self.mapred_queue,
mapred_queue_priority=self.mapred_queue_priority,
mapred_job_name=self.mapred_job_name)
def prepare_template(self):
if self.hiveconf_jinja_translate:
self.hql = re.sub(
r"(\$\{(hiveconf:)?([ a-zA-Z0-9_]*)\})", r"{{ \g<3> }}", self.hql)
if self.script_begin_tag and self.script_begin_tag in self.hql:
self.hql = "\n".join(self.hql.split(self.script_begin_tag)[1:])
def execute(self, context):
self.log.info('Executing: %s', self.hql)
self.hook = self.get_hook()
# set the mapred_job_name if it's not set with dag, task, execution time info
if not self.mapred_job_name:
ti = context['ti']
self.hook.mapred_job_name = self.mapred_job_name_template\
.format(dag_id=ti.dag_id, task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
hostname=ti.hostname.split('.')[0])
if self.hiveconf_jinja_translate:
self.hiveconfs = context_to_airflow_vars(context)
else:
self.hiveconfs.update(context_to_airflow_vars(context))
self.log.info('Passing HiveConf: %s', self.hiveconfs)
self.hook.run_cli(hql=self.hql, schema=self.schema, hive_conf=self.hiveconfs)
def dry_run(self):
self.hook = self.get_hook()
self.hook.test_hql(hql=self.hql)
def on_kill(self):
if self.hook:
self.hook.kill()
|
the-stack_106_29472 | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class BatchUpdateDevicesStatusResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""BatchUpdateDevicesStatusResponse - a model defined in huaweicloud sdk"""
super(BatchUpdateDevicesStatusResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchUpdateDevicesStatusResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_29473 | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **callable code object** utilities.
This private submodule implements utility functions dynamically introspecting
**code objects** (i.e., instances of the :class:`CodeType` type)
underlying all pure-Python callables.
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype.roar._roarexc import _BeartypeUtilCallableException
from beartype._util.func.utilfuncwrap import unwrap_func
from beartype._data.datatyping import Codeobjable, TypeException
from types import CodeType, FrameType, FunctionType, GeneratorType, MethodType
from typing import Any, Optional
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ GETTERS }....................
def get_func_codeobj(
# Mandatory parameters.
func: Codeobjable,
# Optional parameters.
is_unwrapping: bool = False,
exception_cls: TypeException = _BeartypeUtilCallableException,
) -> CodeType:
'''
**Code object** (i.e., instance of the :class:`CodeType` type) underlying
the passed **codeobjable** (i.e., pure-Python object directly associated
with a code object) if this object is codeobjable *or* raise an exception
otherwise (e.g., if this object is *not* codeobjable).
For convenience, this getter also accepts a code object, in which case that
code object is simply returned as is.
Code objects have a docstring under CPython resembling:
.. code-block:: python
Code objects provide these attributes:
co_argcount number of arguments (not including *, ** args
or keyword only arguments)
co_code string of raw compiled bytecode
co_cellvars tuple of names of cell variables
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was
created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg |
8=**arg | 16=nested | 32=generator | 64=nofree |
128=coroutine | 256=iterable_coroutine |
512=async_generator
co_freevars tuple of names of free variables
co_posonlyargcount number of positional only arguments
co_kwonlyargcount number of keyword only arguments (not including
** arg)
co_lnotab encoded mapping of line numbers to bytecode
indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables
Parameters
----------
func : Codeobjable
Codeobjable to be inspected.
is_unwrapping: bool, optional
``True`` only if this getter implicitly calls the :func:`unwrap_func`
function to unwrap this possibly higher-level wrapper into a possibly
lower-level wrappee *before* returning the code object of that wrappee.
Note that doing so incurs worst-case time complexity ``O(n)`` for ``n``
the number of lower-level wrappees wrapped by this wrapper. Defaults to
``False`` for efficiency.
exception_cls : type, optional
Type of exception in the event of a fatal error. Defaults to
:class:`_BeartypeUtilCallableException`.
Returns
----------
CodeType
Code object underlying this callable.
Raises
----------
exception_cls
If this callable has *no* code object and is thus *not* pure-Python.
'''
# Code object underlying this callable if this callable is pure-Python *OR*
# "None" otherwise.
func_codeobj = get_func_codeobj_or_none(
func=func, is_unwrapping=is_unwrapping)
# If this callable is *NOT* pure-Python...
if func_codeobj is None:
# Avoid circular import dependencies.
from beartype._util.func.utilfunctest import die_unless_func_python
# Raise an exception.
die_unless_func_python(func=func, exception_cls=exception_cls)
# Else, this callable is pure-Python and this code object exists.
# Return this code object.
return func_codeobj # type: ignore[return-value]
def get_func_codeobj_or_none(
# Mandatory parameters.
func: Any,
# Optional parameters.
is_unwrapping: bool = False,
) -> Optional[CodeType]:
'''
**Code object** (i.e., instance of the :class:`CodeType` type) underlying
the passed **codeobjable** (i.e., pure-Python object directly associated
with a code object) if this object is codeobjable *or* ``None`` otherwise
(e.g., if this object is *not* codeobjable).
Specifically, if the passed object is a:
* Pure-Python function, this getter returns the code object of that
function.
* Pure-Python bound method wrapping a pure-Python unbound function, this
getter returns the code object of the latter.
* Pure-Python call stack frame, this getter returns the code object of the
pure-Python callable encapsulated by that frame.
* Code object, this getter returns that code object.
* Any other object, this getter raises an exception.
Caveats
-------
If ``is_unwrapping``, **this callable has worst-case time complexity**
``O(n)`` **for** ``n`` **the number of lower-level wrappees wrapped by this
higher-level wrapper.** That parameter should thus be disabled in
time-critical code paths; instead, the lowest-level wrappee returned by the
:func:``beartype._util.func.utilfuncwrap.unwrap_func` function should be
temporarily stored and then repeatedly passed.
Parameters
----------
func : Codeobjable
Codeobjable to be inspected.
is_unwrapping: bool, optional
``True`` only if this getter implicitly calls the :func:`unwrap_func`
function to unwrap this possibly higher-level wrapper into a possibly
lower-level wrappee *before* returning the code object of that wrappee.
Note that doing so incurs worst-case time complexity ``O(n)`` for ``n``
the number of lower-level wrappees wrapped by this wrapper. Defaults to
``False`` for efficiency.
Returns
----------
Optional[CodeType]
Either:
* If the passed callable is pure-Python, that callable's code object.
* Else, ``None``.
See Also
----------
:func:`get_func_codeobj`
Further details.
'''
assert is_unwrapping.__class__ is bool, f'{is_unwrapping} not boolean.'
# Note that:
# * For efficiency, tests are intentionally ordered in decreasing
# likelihood of a successful match.
# * An equivalent algorithm could also technically be written as a chain of
# "getattr(func, '__code__', None)" calls, but that doing so would both
# be less efficient *AND* render this getter less robust. Why? Because
# the getattr() builtin internally calls the __getattr__() and
# __getattribute__() dunder methods (either of which could raise
# arbitrary exceptions) and is thus considerably less safe.
#
# If this object is already a code object, return this object as is.
if isinstance(func, CodeType):
return func
# Else, this object is *NOT* already a code object.
#
# If this object is a pure-Python function...
#
# Note that this test intentionally leverages the standard
# "types.FunctionType" class rather than our equivalent
# "beartype.cave.FunctionType" class to avoid circular import issues.
elif isinstance(func, FunctionType):
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# CAUTION: Synchronize this with the same test below (for methods).
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Return the code object of either:
# * If unwrapping this function, the lowest-level wrappee wrapped by
# this function.
# * Else, this function as is.
return (unwrap_func(func) if is_unwrapping else func).__code__ # type: ignore[attr-defined]
# Else, this object is *NOT* a pure-Python function.
#
# If this callable is a bound method, return this method's code object.
#
# Note this test intentionally tests the standard "types.MethodType" class
# rather than our equivalent "beartype.cave.MethodBoundInstanceOrClassType"
# class to avoid circular import issues.
elif isinstance(func, MethodType):
# Unbound function underlying this bound method.
func = func.__func__
#FIXME: Can "MethodType" objects actually bind lower-level C-based
#rather than pure-Python functions? We kinda doubt it -- but maybe they
#can. If they can't, then this test is superfluous and should be
#removed with all haste.
# If this unbound function is pure-Python...
if isinstance(func, FunctionType):
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# CAUTION: Synchronize this with the same test above.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Return the code object of either:
# * If unwrapping this function, the lowest-level wrappee wrapped
# by this function.
# * Else, this function as is.
return (unwrap_func(func) if is_unwrapping else func).__code__ # type: ignore[attr-defined]
# Else, this callable is *NOT* a pure-Python bound method.
#
# If this object is a pure-Python generator, return this generator's code
# object.
elif isinstance(func, GeneratorType):
return func.gi_code
# Else, this object is *NOT* a pure-Python generator.
#
# If this object is a call stack frame, return this frame's code object.
elif isinstance(func, FrameType):
#FIXME: *SUS AF.* This is likely to behave as expected *ONLY* for
#frames encapsulating pure-Python callables. For frames encapsulating
#C-based callables, this is likely to fail with an "AttributeError"
#exception. That said, we have *NO* idea how to test this short of
#defining our own C-based callable accepting a pure-Python callable as
#a callback parameter and calling that callback. Are there even C-based
#callables like that in the wild?
return func.f_code
# Else, this object is *NOT* a call stack frame. Since none of the above
# tests matched, this object *MUST* be a C-based callable.
# Fallback to returning "None".
return None
|
the-stack_106_29474 | # -*- coding:utf-8 -*-
#
# Name :FPGA based DES design,python上位机,下位机IC:CY7C68013A
# Origin:190415
# Author:helrori
#
import usb.core
import usb.util
import sys
import time
from pyDes import des, PAD_NORMAL,ECB
import binascii
import numpy as np
pkg_1 = 1 #该值最大值由FPGA 内FIFO大小决定 fifo_size/512 详见usb.v内说明
def des_encrypt(k,s):
"""
DES 加密
:param s: 原始字符串
:return: 加密后字符串,16进制
"""
secret_key = k
iv = secret_key
k = des(secret_key, ECB, iv, pad=None, padmode=PAD_NORMAL )
en = k.encrypt(s, padmode=PAD_NORMAL )
return (en)
def des_descrypt(k,s):
"""
DES 解密
:param s: 加密后的字符串,16进制
:return: 解密后的字符串
"""
secret_key = k
iv = k
k = des(secret_key, ECB, iv, pad=None, padmode=PAD_NORMAL )
de = k.decrypt(s, padmode=PAD_NORMAL )
return (de)
def fpga_des(dev=None,encrypt=True,debug_mesg=True,key=bytearray(b'\x13\x34\x57\x79\x9B\xBC\xDF\xF1'),text=''):
"""
FPGA DES 加密或解密
:param dev:
:param encrypt:
:param debug_mesg:
:param key:
:param text:原文余8为0,且必须 < 65535*fifo_size字节;
:return: recvdata
"""
global pkg_1
fifo_size=pkg_1*512 #FPGA FIFO字节大小
n=1+len(text)//fifo_size # 下发包数量;text < 65535*fifo_size
uu=len(text)%fifo_size # 余
pkg_0 = n.to_bytes(2, byteorder = 'big')
pkg_1_ = pkg_1.to_bytes(2, byteorder = 'big')
if(encrypt == True):
if(key[7]&1!=1):
key[7]=key[7]+1
else:
if(key[7]&1==1):
key[7]=key[7]-1
#REGW
dev.write(0x02,b'REGW0000',100)# 'REGW0000'字节数不足512,实际下发仍是512字节
data = dev.read(0x86,512)
if(debug_mesg==True):
print ('send: REGW0000 recv: '+''.join(c for c in map(chr,data[0:8])))
dev.write(0x02,key,100)
#REGW_END
recvdata =np.zeros(len(text),dtype='uint8')
microdata=np.zeros(fifo_size,dtype='uint8')
#text=np.append(text,[0]*(fifo_size-uu))# text增加到fifo_size的整数倍
for i in range(fifo_size-uu):
text+=b'\x00'
#LOPD
dev.write(0x02,b'LOPD'+pkg_1_+pkg_0,100)
buff0=dev.read(0x86,512)
for j in range(n):
if(j==(n-1)):
ed=j*fifo_size+uu
else:
ed=(j+1)*fifo_size
if(j*fifo_size != ed):
dev.write(0x02,text[j*fifo_size:(j+1)*fifo_size],100)
microdata = np.array(dev.read(0x86,fifo_size))
recvdata[j*fifo_size:ed] = microdata[0:ed-j*fifo_size].copy()
#LOPD_END
return recvdata
def fpga_encrypt(dev,key,ifilename,ofilename):
global pkg_1
fifo_size=pkg_1*512 #FPGA FIFO字节大小
#加密补零
flie=open(ifilename, 'rb+')
databytes = flie.read()
u=len(databytes)%8
padding=8-u
for i in range(padding):
databytes+=b'\x00' # zeros padding ANSI X.923
if(len(databytes) >= 65535*fifo_size):#
print('file too big:',len(databytes))
recvdata =np.zeros(len(databytes),dtype='uint8')
n=1+len(databytes)//(65535*fifo_size-8)
u=len(databytes)%(65535*fifo_size-8)
for i in range(n):
if(i==(n-1)):
ed=i*(65535*fifo_size-8)+u
else:
ed=(i+1)*(65535*fifo_size-8)
if(ed != i*(65535*fifo_size-8)):
buff=fpga_des(dev,True,True,key,databytes[i*(65535*fifo_size-8):ed])
recvdata[i*(65535*fifo_size-8):ed]=buff[0:ed-i*(65535*fifo_size-8)].copy()
else:
recvdata=fpga_des(dev,True,False,key,databytes)
#加密文件保存
file_o= open(ofilename, 'w')
recvdata=np.append(recvdata,np.array(padding,dtype='uint8'))# paddding信息同时保存到.des文件
recvdata.tofile(file_o)
file_o.close()
flie.close()
return len(databytes)
def fpga_descrypt(dev,key,ifilename,ofilename):
global pkg_1
fifo_size=pkg_1*512 #FPGA FIFO字节大小
#解密
flie=open(ifilename, 'rb+')
databytes = flie.read()
if((len(databytes)-1) >= 65535*fifo_size):#
print('file too big:',len(databytes)-1)
recvdata =np.zeros(len(databytes)-1,dtype='uint8')
n=1+(len(databytes)-1)//(65535*fifo_size-8)
u=(len(databytes)-1)%(65535*fifo_size-8)
for i in range(n):
if(i==(n-1)):
ed=i*(65535*fifo_size-8)+u
else:
ed=(i+1)*(65535*fifo_size-8)
if(ed != i*(65535*fifo_size-8)):
buff=fpga_des(dev,False,True,key,databytes[i*(65535*fifo_size-8):ed])
recvdata[i*(65535*fifo_size-8):ed]=buff[0:ed-i*(65535*fifo_size-8)].copy()
else:
recvdata=fpga_des(dev,False,False,key,databytes[0:-1])# 去掉paddding
padding=databytes[-1] # 得到.des文件的paddding信息
#解密文件保存
file_o= open(ofilename, 'w')
recvdata[0:-(padding)].tofile(file_o)# 去掉paddding保存
file_o.close()
flie.close()
return len(databytes)-1
def main():
dev = usb.core.find(idVendor=0x04b4, idProduct=0x1003)
#print(dev)
if dev is None:
raise ValueError('Device not found')
dev.set_configuration()
print('Device found VID 0x04b4,PID 0x1003')
key = bytearray(b'\x13\x34\x57\x79\x9B\xBC\xDF\xF1')
text0 = b'\x01\x23\x45\x67\x89\xAB\xCD\xEF'
text1 = b'\x85\xE8\x13\x54\x0F\x0A\xB4\x05'
print('------------------------python pyDes 加密解密结果--------------------------')
print("key : "+ ' '.join(c for c in map(hex,key)))
print("text0 : "+ ' '.join(c for c in map(hex,text0)))
print("text1 : "+ ' '.join(c for c in map(hex,text1)))
a=des_encrypt(key,text0)
b=des_encrypt(key,text1)
print('en(text0): ',' '.join(c for c in map(hex,a)),'\nen(text1): ',' '.join(c for c in map(hex,b)))
a=des_descrypt(key,text0)
b=des_descrypt(key,text1)
print('de(text0): ',' '.join(c for c in map(hex,a)),'\nde(text1): ',' '.join(c for c in map(hex,b)))
print('------------------------FPGA USB DES 加密解密结果--------------------------')
print("key : "+ ' '.join(c for c in map(hex,key)))
print("text0 : "+ ' '.join(c for c in map(hex,text0)))
print("text1 : "+ ' '.join(c for c in map(hex,text1)))
a=fpga_des(dev,True,False,key,text0)
b=fpga_des(dev,True,False,key,text1)
print('en(text0): ',' '.join(c for c in map(hex,a)),'\nen(text1): ',' '.join(c for c in map(hex,b)))
a=fpga_des(dev,False,False,key,text0)
b=fpga_des(dev,False,False,key,text1)
print('de(text0): ',' '.join(c for c in map(hex,a)),'\nde(text1): ',' '.join(c for c in map(hex,b)))
print('--------------------------------------------------------------------------')
print('--------------------------FPGA USB DES 加密解密文件测试-------------------')
encrypt=input('请选择加密或解密 0:加密; 1:解密 ')
k=input('请输入8字节密钥(直接回车使用默认密钥):\n')
ifilename=input('输入文件名:\n')
if(k==''):
k=key
print('key: '+' '.join(c for c in map(hex,k)))
else:
if(len(k)==8):
k=k.encode()
k=bytearray(k)
print('key: ',k)
else:
print('key not valuable')
exit(0)
if(ifilename==''):
print('file name empty')
exit(0)
else:
print('file name: ',ifilename)
if(encrypt=='0'):
ofilename=ifilename+'.des'
print('output file name: ',ofilename)
start = time.time()
file_size=fpga_encrypt(dev,k,ifilename,ofilename)
end = time.time()
elif(encrypt=='1'):
ofilename=ifilename[0:-4]
if(ifilename[-4:] != '.des'):
print('not des file')
exit(0)
print('output file name: ',ofilename)
start = time.time()
file_size=fpga_descrypt(dev,k,ifilename,ofilename)
end = time.time()
else:
print('???')
exit(0)
print("time consuming :%.3f seconds" % (end - start))
#print("实际速度 :%.3f MB per second" % (file_size/(end - start)/1024/1024))
print('--------------------------------------------------------------------------')
if __name__ == "__main__":
main()
|
the-stack_106_29476 |
import struct
from bprocessor import RecordProcessor, UnexpectedRecordException, RecordDescriptor
from btypes import BinaryRecordType
class ACUid:
@staticmethod
def read(stream):
rprocessor = RecordProcessor.resolve(stream)
ac = AlternatContent.read(rprocessor)
r = rprocessor.read_descriptor()
if r.rtype != BinaryRecordType.BrtUid:
raise UnexpectedRecordException(r, BinaryRecordType.BrtUid)
data = stream.read(r.size)
r = rprocessor.read_descriptor()
if r.rtype != BinaryRecordType.BrtACEnd:
raise UnexpectedRecordException(r, BinaryRecordType.BrtACEnd)
return ACUid(ac, data)
def __init__(self, ac, data):
self.ac = ac
self.data = data
def write(self, stream):
rprocessor = RecordProcessor.resolve(stream)
ac = self.ac
RecordDescriptor(BinaryRecordType.BrtACBegin, len(ac)).write(rprocessor)
ac.write(rprocessor)
RecordDescriptor(BinaryRecordType.BrtUid, len(self)).write(rprocessor)
rprocessor.write(self.data)
RecordDescriptor(BinaryRecordType.BrtACEnd).write(rprocessor)
def __len__(self):
return len(self.data)
class AlternatContent:
@staticmethod
def read(stream):
c_ver = struct.unpack('<H', stream.read(2))[0]
product_versions = []
for i in range(c_ver):
product_versions.append(ACProductVersion.read(stream))
return AlternatContent(product_versions)
def __init__(self, product_versions):
self.product_versions = product_versions
def write(self, stream):
product_versions = self.product_versions
stream.write(struct.pack('<H', len(product_versions)))
for product_version in product_versions:
product_version.write(stream)
def __len__(self):
return 2 + sum(len(v) for v in self.product_versions)
class ACProductVersion:
@staticmethod
def read(stream):
file_version, flags = struct.unpack('<HH', stream.read(4))
file_product = flags & 0x7fff
file_extension = flags & 0x8000
return ACProductVersion(file_version, file_product, bool(file_extension))
def __init__(self, version, product, forward_compatiblity):
self.version = version
self.product = product
self.forward_compatiblity = forward_compatiblity
def write(self, stream):
flags = self.product
if self.forward_compatiblity:
flags |= 0x8000
stream.write(struct.pack('<HH', self.version, flags))
def __len__(self):
return 4 |
the-stack_106_29477 | #########################
# Star Wars Dice Roller #
#########################
import gc
import random
from ..menus import base_menus as m
# Define the dice
class dice:
def __init__(self, colour, name, sides, results, pool):
self.colour = colour
self.name = name
self.sides = sides
self.results = results
self.pool = pool
global dice_name_list
global dice_colour_list
global dice_sides_list
global dice_results_list
global dice_pool_list
dice_name_list = []
dice_colour_list = []
dice_sides_list = []
dice_results_list = []
dice_pool_list = []
# Green (Ability) - d8 [s,a,sa,ss,a,s,aa,' ']
ability = dice("green", "ability", 8, ["s", "a", "sa", "ss", "a", "s", "aa", " "], 0)
# Yellow (Proficiency) - d12 [aa,a,aa,ts,s,sa,s,sa,ss,sa,ss,' ']
proficiency = dice(
"yellow",
"proficiency",
12,
["aa", "a", "aa", "Ts", "s", "sa", "s", "sa", "ss", "sa", "ss", " "],
1,
)
# Purple (Difficulty) - d8 [t,f,ft,t,' ',tt,ff,t]
difficulty = dice(
"purple", "difficulty", 8, ["t", "f", "ft", "t", " ", "tt", "ff", "t"], 2
)
# Red (Challenge) - d12 [tt,t,tt,t,ft,f,ft,f,ff,df,ff,' ']
challenge = dice(
"red",
"challenge",
12,
["tt", "t", "tt", "t", "ft", "f", "ft", "f", "ff", "Df", "ff", " "],
3,
)
# Blue (Boost) - d6 [sa,aa,,s,a,' ',' ']
boost = dice("blue", "boost", 6, ["sa", "aa", "s", "a", " ", " "], 4)
# Black (Setback) - d6 [' ',' ',t,t,f,f]
setback = dice("black", "setback", 6, [" ", " ", "t", "t", "f", "f"], 5)
# White (Force) - d12 [d,d,d,d,d,d,ll,ll,ll,l,l,dd]
force = dice(
"white",
"force",
12,
["d", "d", "d", "d", "d", "d", "ll", "ll", "ll", "l", "l", "dd"],
6,
)
dice_name_set = set(dice_name_list)
dice_name_list = list(dice_name_set)
dice_colour_set = set(dice_colour_list)
dice_colour_list = list(dice_colour_set)
dice_sides_set = set(dice_sides_list)
dice_sides_list = list(dice_sides_set)
dice_results_set = set(dice_results_list)
dice_results_list = list(dice_results_set)
dice_pool_set = set(dice_pool_list)
dice_pool_list = list(dice_pool_set)
def printchoices():
for obj in gc.get_objects():
if isinstance(obj, dice):
print(obj.name + " (" + obj.colour + "): " + str(swpool[obj.pool]))
dice_name_list.append(obj.name)
dice_colour_list.append(obj.colour)
dice_sides_list.append(obj.sides)
dice_results_list.append(obj.results)
dice_pool_list.append(obj.pool)
# Create swPool: Ask about which dice are being rolled by colour
def createswpool():
global swpool
swpool = []
pc = 0
for i in range(0, 7):
try:
die = abs(
int(
input(
"How many "
+ str(dice_name_list[pc])
+ " ("
+ str(dice_colour_list[pc])
+ ") dice? "
)
)
)
swpool.append(die)
except (NameError, TypeError, ValueError):
swpool.append(0)
pc = pc + 1
# Roll dice
def rolldice():
global rolled_results
rolled_results = []
rpc = 0
for i in range(0, 7):
if swpool[rpc] > 0:
for p in range(0, swpool[rpc]):
roll = random.randint(0, int(dice_sides_list[rpc]) - 1)
die_face = dice_results_list[rpc][roll]
rolled_results.append(die_face)
else:
rolled_results.append(" ")
rpc = rpc + 1
# Consolidate the success/failure, advantage/disadvantage, \
# triumph/despair
final_result = str(rolled_results)
successes = final_result.count("s")
failures = final_result.count("f")
advantages = final_result.count("a")
threats = final_result.count("t")
triumphs = final_result.count("T")
despairs = final_result.count("D")
light = final_result.count("l")
dark = final_result.count("d")
success_vs_failure = int(successes - failures)
advantages_vs_threats = int(advantages - threats)
print("\n")
if success_vs_failure >= 0:
print("Success " + str(success_vs_failure))
else:
print("Failure " + str(abs(success_vs_failure)))
if advantages_vs_threats >= 0:
print("Advantage " + str(advantages_vs_threats))
else:
print("Threat " + str(abs(advantages_vs_threats)))
print("Triumphs " + str(triumphs))
print("Despair " + str(despairs))
print("Lightside " + str(light))
print("Darkside " + str(dark))
print("\n")
# Offer reroll same, new swpool, or quit to menu
def swdicemenu():
print("Choose an option from the menu:")
print("1: Reroll the same dice pool")
print("2: Roll a new pool of dice")
print("q: Quit to main menu")
swubmenu_choice = input("Please enter your choice: ")
if swubmenu_choice == "1":
print("\n")
printchoices()
print("\n")
rolldice()
print("\n")
swdicemenu()
print("\n")
elif swubmenu_choice == "2":
createswpool()
print("\n")
printchoices()
print("\n")
rolldice()
swdicemenu()
print("\n")
elif swubmenu_choice == "q":
m.main_menu()
else:
print("Sorry - please try something else.")
print("\n")
swdicemenu()
def starwarsdice():
try:
global swpool
swpool = [0, 0, 0, 0, 0, 0, 0]
print("What dice do you want to roll?")
printchoices()
print("\n")
print("Enter how many of each die you want:")
createswpool()
print("\n")
printchoices()
print("\n")
rolldice()
swdicemenu()
except (NameError, TypeError, ValueError):
print("Sorry, try something else")
swdicemenu()
|
the-stack_106_29478 | import pytest
from graphapi.schema import schema
from openstates.data.models import Organization, Person
from .utils import populate_db
@pytest.mark.django_db
def setup():
populate_db()
@pytest.mark.django_db
def test_jurisdictions(django_assert_num_queries):
with django_assert_num_queries(2):
result = schema.execute(
""" {
jurisdictions {
edges {
node {
name
}
}
}
}
"""
)
assert result.errors is None
assert result.data["jurisdictions"]["edges"][0]["node"]["name"] == "Alaska"
assert result.data["jurisdictions"]["edges"][1]["node"]["name"] == "Wyoming"
@pytest.mark.django_db
def test_jurisdictions_num_queries(django_assert_num_queries):
with django_assert_num_queries(4):
result = schema.execute(
""" {
jurisdictions {
edges {
node {
name
legislativeSessions {
edges { node { identifier } }
}
organizations(first: 50) {
edges { node { name } }
}
}
}
}
}
"""
)
assert result.errors is None
assert (
len(
result.data["jurisdictions"]["edges"][0]["node"]["legislativeSessions"][
"edges"
]
)
== 2
)
assert (
len(result.data["jurisdictions"]["edges"][0]["node"]["organizations"]["edges"])
== 3
)
@pytest.mark.django_db
def test_jurisdictions_num_queries_subquery(django_assert_num_queries):
# same as test_jurisdictions_num_queries but with slightly more complex filtering on nodes
with django_assert_num_queries(4):
result = schema.execute(
""" {
jurisdictions {
edges {
node {
name
legislativeSessions(first: 1) {
edges { node { identifier } }
}
organizations(classification: "legislature", first: 50) {
edges { node { name } }
}
}
}
}
}
"""
)
assert result.errors is None
assert (
len(
result.data["jurisdictions"]["edges"][0]["node"]["legislativeSessions"][
"edges"
]
)
== 1
)
assert (
len(result.data["jurisdictions"]["edges"][0]["node"]["organizations"]["edges"])
== 1
)
@pytest.mark.django_db
def test_jurisdiction_by_id(django_assert_num_queries):
with django_assert_num_queries(5):
result = schema.execute(
""" {
jurisdiction(id:"ocd-jurisdiction/country:us/state:wy/government") {
name
legislativeSessions(first: 1) {
edges { node { identifier } }
}
organizations(classification: "legislature", first: 50) {
edges { node { name } }
}
}
}
"""
)
assert result.errors is None
assert len(result.data["jurisdiction"]["legislativeSessions"]["edges"]) == 1
assert len(result.data["jurisdiction"]["organizations"]["edges"]) == 1
@pytest.mark.django_db
def test_jurisdiction_by_name(django_assert_num_queries):
with django_assert_num_queries(5):
result = schema.execute(
""" {
jurisdiction(name:"Wyoming") {
name
legislativeSessions(first: 1) {
edges { node { identifier } }
}
organizations(classification: "legislature", first: 50) {
edges { node { name } }
}
}
}
"""
)
assert result.errors is None
assert len(result.data["jurisdiction"]["legislativeSessions"]["edges"]) == 1
assert len(result.data["jurisdiction"]["organizations"]["edges"]) == 1
@pytest.mark.django_db
def test_jurisdiction_chambers_current_members(django_assert_num_queries):
with django_assert_num_queries(5):
result = schema.execute(
""" {
jurisdiction(name:"Wyoming") {
chambers: organizations(classification:["upper", "lower"], first:2)
{ edges { node {
name
currentMemberships {
person { name }
}
} }
}
}
}
"""
)
assert result.errors is None
assert len(result.data["jurisdiction"]["chambers"]["edges"]) == 2
assert set(("Wyoming House", "Wyoming Senate")) == set(
edge["node"]["name"]
for edge in result.data["jurisdiction"]["chambers"]["edges"]
)
people = []
for chamber in result.data["jurisdiction"]["chambers"]["edges"]:
for m in chamber["node"]["currentMemberships"]:
people.append(m["person"]["name"])
assert len(people) == 2
@pytest.mark.django_db
def test_people_by_member_of(django_assert_num_queries):
ak_house = Organization.objects.get(
jurisdiction__name="Alaska", classification="lower"
)
with django_assert_num_queries(2):
result = schema.execute(
""" {
people(memberOf: "%s", first: 50) {
edges {
node {
name
}
}
}
}
"""
% ak_house.id
)
assert result.errors is None
assert len(result.data["people"]["edges"]) == 4
@pytest.mark.django_db
def test_variable_people_by_member_of(django_assert_num_queries):
ak_house = Organization.objects.get(
jurisdiction__name="Alaska", classification="lower"
)
with django_assert_num_queries(2):
result = schema.execute(
"""
query peeps($f: Int){
people(memberOf: "%s", first: $f) {
edges {
node {
name
}
}
}
}
"""
% ak_house.id,
variables={"f": 3},
)
assert result.errors is None
assert len(result.data["people"]["edges"]) == 3
@pytest.mark.django_db
def test_people_by_ever_member_of(django_assert_num_queries):
ak_house = Organization.objects.get(
jurisdiction__name="Alaska", classification="lower"
)
with django_assert_num_queries(2):
result = schema.execute(
""" {
people(everMemberOf: "%s", first:50) {
edges {
node {
name
}
}
}
}
"""
% ak_house.id
)
assert result.errors is None
# one extra person (Ellen Evil) is added as a former member of the House
assert len(result.data["people"]["edges"]) == 5
@pytest.mark.django_db
def test_people_by_district():
ak_house = Organization.objects.get(
jurisdiction__name="Alaska", classification="lower"
)
result = schema.execute(
""" {
ones: people(memberOf: "%s", district: "1", first: 50) {
edges { node { name } }
}
fives: people(everMemberOf: "%s", district: "5", first: 50) {
edges { node { name } }
}
bad: people(district: "1", first: 50) {
edges { node { name } }
}
}
"""
% (ak_house.id, ak_house.id)
)
assert "'district' parameter requires" in result.errors[0].message
assert len(result.data["ones"]["edges"]) == 1
assert len(result.data["fives"]["edges"]) == 1
assert result.data["bad"] is None
@pytest.mark.django_db
def test_people_by_division_id():
# Note: uses a fake divisionId that has two reps (one retired), only one should be returned
result = schema.execute(
""" {
people(divisionId: "ocd-division/country:us/state:ak/sldu:b", first: 50) {
edges { node { name } }
}
}
"""
)
assert len(result.data["people"]["edges"]) == 1
@pytest.mark.django_db
def test_people_by_name():
result = schema.execute(
""" {
people(name: "Hank", first: 50) {
edges { node { name } }
}
}
"""
)
assert result.errors is None
assert len(result.data["people"]["edges"]) == 1
@pytest.mark.django_db
def test_people_by_party():
result = schema.execute(
""" {
dems: people(memberOf: "Democratic", first: 50) {
edges { node { name } }
}
reps: people(memberOf: "Republican", first: 50) {
edges { node { name } }
}
}
"""
)
assert result.errors is None
assert len(result.data["dems"]["edges"]) == 3
assert len(result.data["reps"]["edges"]) == 4
# @pytest.mark.django_db
# def test_people_by_location():
# # TODO: need data to test with
# pass
@pytest.mark.django_db
def test_people_num_queries(django_assert_num_queries):
with django_assert_num_queries(8):
result = schema.execute(
""" {
people(first: 50) {
edges {
node {
name
image
identifiers { identifier }
otherNames { name }
links { url }
sources { url }
contactDetails { value label }
currentMemberships {
post {
label
division {
id
}
}
organization { name }
}
}
}
}
}"""
)
assert result.errors is None
assert len(result.data["people"]["edges"]) == 9
total_memberships = 0
for person in result.data["people"]["edges"]:
total_memberships += len(person["node"]["currentMemberships"])
assert total_memberships == 16 # 8 chambers + 8 parties
@pytest.mark.django_db
def test_people_total_count(django_assert_num_queries):
with django_assert_num_queries(2):
result = schema.execute(
""" {
people(first: 50) {
totalCount
edges {
node {
name
}
}
}
}"""
)
assert result.errors is None
assert result.data["people"]["totalCount"] == 9
assert len(result.data["people"]["edges"]) == 9
with django_assert_num_queries(2):
result = schema.execute(
""" {
people(first: 50, name: "Amanda") {
totalCount
edges {
node {
name
}
}
}
}"""
)
assert result.errors is None
assert result.data["people"]["totalCount"] == 1
assert len(result.data["people"]["edges"]) == 1
@pytest.mark.django_db
def test_people_current_memberships_classification(django_assert_num_queries):
with django_assert_num_queries(3):
result = schema.execute(
""" {
people(first: 50) {
edges {
node {
currentMemberships(classification: "party") {
organization { name }
}
}
}
}
}"""
)
assert result.errors is None
total_memberships = 0
for person in result.data["people"]["edges"]:
total_memberships += len(person["node"]["currentMemberships"])
assert total_memberships == 8 # Only the 8 parties should be returned
@pytest.mark.django_db
def test_people_old_memberships(django_assert_num_queries):
with django_assert_num_queries(3):
result = schema.execute(
"""{
people(first: 50) {
edges {
node {
oldMemberships {
organization { name }
}
}
}
}
}"""
)
assert result.errors is None
old_memberships = 0
for person in result.data["people"]["edges"]:
old_memberships += len(person["node"]["oldMemberships"])
assert old_memberships == 3 # three old memberships in test data right now
@pytest.mark.django_db
def test_person_by_id(django_assert_num_queries):
person = Person.objects.get(name="Bob Birch")
with django_assert_num_queries(7):
result = schema.execute(
""" {
person(id:"%s") {
name
image
primaryParty
identifiers { identifier }
otherNames { name }
links { url }
sources { url }
contactDetails { value label }
currentMemberships {
post {
label
division {
id
}
}
organization { name }
}
}
}"""
% person.id
)
assert result.errors is None
assert result.data["person"]["name"] == "Bob Birch"
assert result.data["person"]["primaryParty"] == "Republican"
assert len(result.data["person"]["currentMemberships"]) == 2
division = None
for membership in result.data["person"]["currentMemberships"]:
if membership["post"]:
division = membership["post"]["division"]
break
assert division["id"] == "ocd-division/country:us/state:ak/sldl:2"
@pytest.mark.django_db
def test_organization_by_id(django_assert_num_queries):
# get targets
leg = Organization.objects.get(
jurisdiction__name="Wyoming", classification="legislature"
)
sen = Organization.objects.get(jurisdiction__name="Wyoming", classification="upper")
# 1 query for legislature, 1 query each for children, links, sources
# 1 query for senate w/ parent
with django_assert_num_queries(6):
result = schema.execute(
""" {
leg: organization(id: "%s") {
name
classification
children(classification: "upper", first: 50) {
edges { node { classification } }
}
links { url }
sources { url }
}
senate: organization(id: "%s") {
name
parent {
name
}
}
}
"""
% (leg.id, sen.id)
)
assert result.errors is None
assert len(result.data["leg"]["children"]["edges"]) == 1
assert result.data["senate"]["parent"]["name"] == "Wyoming Legislature"
@pytest.mark.django_db
def test_people_by_updated_since():
middle_date = Person.objects.all().order_by("updated_at")[2].updated_at
result = schema.execute(
"""{
all: people(updatedSince: "2017-01-01T00:00:00Z", last:50) {
edges { node { name } }
}
some: people(updatedSince: "%s", first:50) {
edges { node { name } }
}
none: people(updatedSince: "2030-01-01T00:00:00Z", first:50) {
edges { node { name } }
}
}"""
% middle_date
)
assert result.errors is None
assert len(result.data["all"]["edges"]) == 9
assert len(result.data["some"]["edges"]) == 7
assert len(result.data["none"]["edges"]) == 0
@pytest.mark.django_db
def test_jurisdiction_fragment(django_assert_num_queries):
with django_assert_num_queries(3):
result = schema.execute(
"""
fragment JurisdictionFields on JurisdictionNode {
id
name
url
legislativeSessions {
edges {
node {
name
startDate
endDate
classification
identifier
}
}
}
}
query jurisdictionsQuery {
jurisdictions {
edges {
node {
...JurisdictionFields
}
}
}
}"""
)
assert result.errors is None
|
the-stack_106_29479 | """Default tags used by the template system, available to all templates."""
from __future__ import unicode_literals
import sys
import re
from datetime import datetime
from itertools import groupby, cycle as itertools_cycle
import warnings
from django.conf import settings
from django.template.base import (Node, NodeList, Template, Context, Library,
TemplateSyntaxError, VariableDoesNotExist, InvalidTemplateLibrary,
BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END,
SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END,
VARIABLE_ATTRIBUTE_SEPARATOR, get_library, token_kwargs, kwarg_re,
render_value_in_context)
from django.template.smartif import IfParser, Literal
from django.template.defaultfilters import date
from django.utils.encoding import smart_text
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils import six
from django.utils import timezone
register = Library()
class AutoEscapeControlNode(Node):
"""Implements the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting, self.nodelist = setting, nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
def render(self, context):
return ''
class CsrfTokenNode(Node):
def render(self, context):
csrf_token = context.get('csrf_token', None)
if csrf_token:
if csrf_token == 'NOTPROVIDED':
return format_html("")
else:
return format_html("<input type='hidden' name='csrfmiddlewaretoken' value='{0}' />", csrf_token)
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
from django.conf import settings
if settings.DEBUG:
warnings.warn("A {% csrf_token %} was used in a template, but the context did not provide the value. This is usually caused by not using RequestContext.")
return ''
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None, silent=False, escape=False):
self.cyclevars = cyclevars
self.variable_name = variable_name
self.silent = silent
self.escape = escape # only while the "future" version exists
def render(self, context):
if self not in context.render_context:
# First time the node is rendered in template
context.render_context[self] = itertools_cycle(self.cyclevars)
cycle_iter = context.render_context[self]
value = next(cycle_iter).resolve(context)
if self.variable_name:
context[self.variable_name] = value
if self.silent:
return ''
if not self.escape:
value = mark_safe(value)
return render_value_in_context(value, context)
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [pformat(val) for val in context]
output.append('\n\n')
output.append(pformat(sys.modules))
return ''.join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
context.update({'var': output})
filtered = self.filter_expr.resolve(context)
context.pop()
return filtered
class FirstOfNode(Node):
def __init__(self, variables, escape=False):
self.vars = variables
self.escape = escape # only while the "future" version exists
def render(self, context):
for var in self.vars:
value = var.resolve(context, True)
if value:
if not self.escape:
value = mark_safe(value)
return render_value_in_context(value, context)
return ''
class ForNode(Node):
child_nodelists = ('nodelist_loop', 'nodelist_empty')
def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None):
self.loopvars, self.sequence = loopvars, sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = ' reversed' if self.is_reversed else ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_loop:
yield node
for node in self.nodelist_empty:
yield node
def render(self, context):
if 'forloop' in context:
parentloop = context['forloop']
else:
parentloop = {}
context.push()
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
if len_values < 1:
context.pop()
return self.nodelist_empty.render(context)
nodelist = NodeList()
if self.is_reversed:
values = reversed(values)
unpack = len(self.loopvars) > 1
# Create a forloop value in the context. We'll update counters on each
# iteration just below.
loop_dict = context['forloop'] = {'parentloop': parentloop}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i+1
# Reverse counter iteration numbers.
loop_dict['revcounter'] = len_values - i
loop_dict['revcounter0'] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item into
# them.
try:
unpacked_vars = dict(zip(self.loopvars, item))
except TypeError:
pass
else:
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
# In TEMPLATE_DEBUG mode provide source of the node which
# actually raised the exception
if settings.TEMPLATE_DEBUG:
for node in self.nodelist_loop:
try:
nodelist.append(node.render(context))
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
else:
for node in self.nodelist_loop:
nodelist.append(node.render(context))
if pop_context:
# The loop variables were pushed on to the context so pop them
# off again. This is necessary because the tag lets the length
# of loopvars differ to the length of each set of items and we
# don't want to leave any vars from the previous loop on the
# context.
context.pop()
context.pop()
return nodelist.render(context)
class IfChangedNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self._varlist = varlist
def render(self, context):
# Init state storage
state_frame = self._get_context_stack_frame(context)
if self not in state_frame:
state_frame[self] = None
nodelist_true_output = None
try:
if self._varlist:
# Consider multiple parameters. This automatically behaves
# like an OR evaluation of the multiple variables.
compare_to = [var.resolve(context, True) for var in self._varlist]
else:
# The "{% ifchanged %}" syntax (without any variables) compares the rendered output.
compare_to = nodelist_true_output = self.nodelist_true.render(context)
except VariableDoesNotExist:
compare_to = None
if compare_to != state_frame[self]:
state_frame[self] = compare_to
return nodelist_true_output or self.nodelist_true.render(context) # render true block if not already rendered
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ''
def _get_context_stack_frame(self, context):
# The Context object behaves like a stack where each template tag can create a new scope.
# Find the place where to store the state to detect changes.
if 'forloop' in context:
# Ifchanged is bound to the local for loop.
# When there is a loop-in-loop, the state is bound to the inner loop,
# so it resets when the outer loop continues.
return context['forloop']
else:
# Using ifchanged outside loops. Effectively this is a no-op because the state is associated with 'self'.
return context.render_context
class IfEqualNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = var1, var2
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfEqualNode>"
def render(self, context):
val1 = self.var1.resolve(context, True)
val2 = self.var2.resolve(context, True)
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return "<IfNode>"
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
for node in nodelist:
yield node
@property
def nodelist(self):
return NodeList(node for _, nodelist in self.conditions_nodelists for node in nodelist)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ''
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def resolve_expression(self, obj, context):
# This method is called for each object in self.target. See regroup()
# for the reason why we temporarily put the object in the context.
context[self.var_name] = obj
return self.expression.resolve(context, True)
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list == None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
{'grouper': key, 'list': list(val)}
for key, val in
groupby(obj_list, lambda obj: self.resolve_expression(obj, context))
]
return ''
def include_is_allowed(filepath):
for root in settings.ALLOWED_INCLUDE_ROOTS:
if filepath.startswith(root):
return True
return False
class SsiNode(Node):
def __init__(self, filepath, parsed):
self.filepath = filepath
self.parsed = parsed
def render(self, context):
filepath = self.filepath.resolve(context)
if not include_is_allowed(filepath):
if settings.DEBUG:
return "[Didn't have permission to include file]"
else:
return '' # Fail silently for invalid includes.
try:
with open(filepath, 'r') as fp:
output = fp.read()
except IOError:
output = ''
if self.parsed:
try:
t = Template(output, name=filepath)
return t.render(context)
except TemplateSyntaxError as e:
if settings.DEBUG:
return "[Included template had syntax error: %s]" % e
else:
return '' # Fail silently for invalid included templates.
return output
class LoadNode(Node):
def render(self, context):
return ''
class NowNode(Node):
def __init__(self, format_string):
self.format_string = format_string
def render(self, context):
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
return date(datetime.now(tz=tzinfo), self.format_string)
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {'openblock': BLOCK_TAG_START,
'closeblock': BLOCK_TAG_END,
'openvariable': VARIABLE_TAG_START,
'closevariable': VARIABLE_TAG_END,
'openbrace': SINGLE_BRACE_START,
'closebrace': SINGLE_BRACE_END,
'opencomment': COMMENT_TAG_START,
'closecomment': COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, '')
class URLNode(Node):
def __init__(self, view_name, args, kwargs, asvar):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
from django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = dict([(smart_text(k, 'ascii'), v.resolve(context))
for k, v in self.kwargs.items()])
view_name = self.view_name.resolve(context)
if not view_name:
raise NoReverseMatch("'url' requires a non-empty first argument. "
"The syntax changed in Django 1.5, see the docs.")
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which case return nothing.
url = ''
try:
url = reverse(view_name, args=args, kwargs=kwargs, current_app=context.current_app)
except NoReverseMatch:
exc_info = sys.exc_info()
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + '.' + view_name,
args=args, kwargs=kwargs,
current_app=context.current_app)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
six.reraise(*exc_info)
else:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
class VerbatimNode(Node):
def __init__(self, content):
self.content = content
def render(self, context):
return self.content
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
def render(self, context):
try:
value = self.val_expr.resolve(context)
max_value = self.max_expr.resolve(context)
max_width = int(self.max_width.resolve(context))
except VariableDoesNotExist:
return ''
except (ValueError, TypeError):
raise TemplateSyntaxError("widthratio final argument must be a number")
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
except ZeroDivisionError:
return '0'
except (ValueError, TypeError):
return ''
return str(int(round(ratio)))
class WithNode(Node):
def __init__(self, var, name, nodelist, extra_context=None):
self.nodelist = nodelist
# var and name are legacy attributes, being left in case they are used
# by third-party subclasses of this Node.
self.extra_context = extra_context or {}
if name:
self.extra_context[name] = var
def __repr__(self):
return "<WithNode>"
def render(self, context):
values = dict([(key, val.resolve(context)) for key, val in
six.iteritems(self.extra_context)])
context.update(values)
output = self.nodelist.render(context)
context.pop()
return output
@register.tag
def autoescape(parser, token):
"""
Force autoescape behavior for this block.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in ('on', 'off'):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(('endautoescape',))
parser.delete_first_token()
return AutoEscapeControlNode((arg == 'on'), nodelist)
@register.tag
def comment(parser, token):
"""
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past('endcomment')
return CommentNode()
@register.tag
def cycle(parser, token, escape=False):
"""
Cycles among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each sucessive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
The optional flag "silent" can be used to prevent the cycle declaration
from returning any value::
{% for o in some_list %}
{% cycle 'row1' 'row2' as rowcolors silent %}
<tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr>
{% endfor %}
"""
if not escape:
warnings.warn(
"'The `cycle` template tag is changing to escape its arguments; "
"the non-autoescaping version is deprecated. Load it "
"from the `future` tag library to start using the new behavior.",
PendingDeprecationWarning, stacklevel=2)
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if ',' in args[1]:
# Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %}
# case.
args[1:2] = ['"%s"' % arg for arg in args[1].split(",")]
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, '_namedCycleNodes'):
raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name)
if not name in parser._namedCycleNodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._namedCycleNodes[name]
as_form = False
if len(args) > 4:
# {% cycle ... as foo [silent] %} case.
if args[-3] == "as":
if args[-1] != "silent":
raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1])
as_form = True
silent = True
args = args[:-1]
elif args[-2] == "as":
as_form = True
silent = False
if as_form:
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name, silent=silent, escape=escape)
if not hasattr(parser, '_namedCycleNodes'):
parser._namedCycleNodes = {}
parser._namedCycleNodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values, escape=escape)
return node
@register.tag
def csrf_token(parser, token):
return CsrfTokenNode()
@register.tag
def debug(parser, token):
"""
Outputs a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
@register.tag('filter')
def do_filter(parser, token):
"""
Filters the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
Note that the ``escape`` and ``safe`` filters are not acceptable arguments.
Instead, use the ``autoescape`` tag to manage autoescaping for blocks of
template code.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
if getattr(func, '_decorated_function', func).__name__ in ('escape', 'safe'):
raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % func.__name__)
nodelist = parser.parse(('endfilter',))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
@register.tag
def firstof(parser, token, escape=False):
"""
Outputs the first variable passed that is not False, without escaping.
Outputs nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 %}
This is equivalent to::
{% if var1 %}
{{ var1|safe }}
{% elif var2 %}
{{ var2|safe }}
{% elif var3 %}
{{ var3|safe }}
{% endif %}
but obviously much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to escape the output, use a filter tag::
{% filter force_escape %}
{% firstof var1 var2 var3 "fallback value" %}
{% endfilter %}
"""
if not escape:
warnings.warn(
"'The `firstof` template tag is changing to escape its arguments; "
"the non-autoescaping version is deprecated. Load it "
"from the `future` tag library to start using the new behavior.",
PendingDeprecationWarning, stacklevel=2)
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
return FirstOfNode([parser.compile_filter(bit) for bit in bits], escape=escape)
@register.tag('for')
def do_for(parser, token):
"""
Loops over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if althete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.split_contents()
if len(bits) < 4:
raise TemplateSyntaxError("'for' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = -3 if is_reversed else -2
if bits[in_index] != 'in':
raise TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or ' ' in var:
raise TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
sequence = parser.compile_filter(bits[in_index+1])
nodelist_loop = parser.parse(('empty', 'endfor',))
token = parser.next_token()
if token.contents == 'empty':
nodelist_empty = parser.parse(('endfor',))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError("%r takes two arguments" % bits[0])
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
val1 = parser.compile_filter(bits[1])
val2 = parser.compile_filter(bits[2])
return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate)
@register.tag
def ifequal(parser, token):
"""
Outputs the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
@register.tag
def ifnotequal(parser, token):
"""
Outputs the contents of the block if the two arguments are not equal.
See ifequal.
"""
return do_ifequal(parser, token, True)
class TemplateLiteral(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
class TemplateIfParser(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
@register.tag('if')
def do_if(parser, token):
"""
The ``{% if %}`` tag evaluates a variable, and if that variable is "true"
(i.e., exists, is not empty, and is not a false boolean value), the
contents of the block are output:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% elif athlete_in_locker_room_list %}
Athletes should be out of the locker room soon!
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
As you can see, the ``if`` tag may take one or several `` {% elif %}``
clauses, as well as an ``{% else %}`` clause that will be displayed if all
previous conditions fail. These clauses are optional.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both atheletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example::
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==`` (or ``=``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
# {% if ... %}
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith('elif'):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == 'else':
nodelist = parser.parse(('endif',))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
assert token.contents == 'endif'
return IfNode(conditions_nodelists)
@register.tag
def ifchanged(parser, token):
"""
Checks if a value has changed from the last iteration of a loop.
The ``{% ifchanged %}`` block tag is used within a loop. It has two
possible uses.
1. Checks its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given one or more variables, check whether any variable has changed.
For example, the following shows the date every time it changes, while
showing the hour if either the hour or the date has changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.split_contents()
nodelist_true = parser.parse(('else', 'endifchanged'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifchanged',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" parsed %}
"""
bits = token.split_contents()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'ssi' tag takes one argument: the path to"
" the file to be included")
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError("Second (optional) argument to %s tag"
" must be 'parsed'" % bits[0])
filepath = parser.compile_filter(bits[1])
return SsiNode(filepath, parsed)
@register.tag
def load(parser, token):
"""
Loads a custom template tag set.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
Can also be used to load an individual tag/filter from
a library::
{% load byline from news %}
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
try:
taglib = bits[-1]
lib = get_library(taglib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
else:
temp_lib = Library()
for name in bits[1:-2]:
if name in lib.tags:
temp_lib.tags[name] = lib.tags[name]
# a name could be a tag *and* a filter, so check for both
if name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
elif name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
else:
raise TemplateSyntaxError("'%s' is not a valid tag or filter in tag library '%s'" %
(name, taglib))
parser.add_library(temp_lib)
else:
for taglib in bits[1:]:
# add the library to the parser
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
return LoadNode()
@register.tag
def now(parser, token):
"""
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string)
@register.tag
def regroup(parser, token):
"""
Regroups a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``people`` is a list of ``Person`` objects that have ``first_name``,
``last_name``, and ``gender`` attributes, and you'd like to display a list
that looks like:
* Male:
* George Bush
* Bill Clinton
* Female:
* Margaret Thatcher
* Colendeeza Rice
* Unknown:
* Pat Smith
The following snippet of template code would accomplish this dubious task::
{% regroup people by gender as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for item in group.list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``
and ``Unknown``, and ``list`` is the list of people with those genders.
Note that ``{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
people was not sorted by gender, you'd need to make sure it is sorted
before using it, i.e.::
{% regroup people|dictsort:"gender" by gender as grouped %}
"""
bits = token.split_contents()
if len(bits) != 6:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(bits[1])
if bits[2] != 'by':
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
if bits[4] != 'as':
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must"
" be 'as'")
var_name = bits[5]
# RegroupNode will take each item in 'target', put it in the context under
# 'var_name', evaluate 'var_name'.'expression' in the current context, and
# group by the resulting value. After all items are processed, it will
# save the final result in the context under 'var_name', thus clearing the
# temporary values. This hack is necessary because the template engine
# doesn't provide a context-aware equivalent of Python's getattr.
expression = parser.compile_filter(var_name +
VARIABLE_ATTRIBUTE_SEPARATOR +
bits[3])
return RegroupNode(target, expression, var_name)
@register.tag
def spaceless(parser, token):
"""
Removes whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
@register.tag
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, list(TemplateTagNode.mapping)))
return TemplateTagNode(tag)
@register.tag
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "path.to.some_view" arg1 arg2 %}
or
{% url "path.to.some_view" name1=value1 name2=value2 %}
The first argument is a path to a view. It can be an absolute Python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project.
Other arguments are space-separated values that will be filled in place of
positional and keyword arguments in the URL. Don't mix positional and
keyword arguments.
All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "app_name.client" client.id %}
The URL will look like ``/clients/client/123/``.
The first argument can also be a named URL instead of the Python path to
the view callable. For example if the URLconf entry looks like this::
url('^client/(\d+)/$', name='client-detail-view')
then in the template you can use::
{% url "client-detail-view" client.id %}
There is even another possible value type for the first argument. It can be
the name of a template variable that will be evaluated to obtain the view
name or the URL name, e.g.::
{% with view_path="app_name.client" %}
{% url view_path client.id %}
{% endwith %}
or,
{% with url_name="client-detail-view" %}
{% url url_name client.id %}
{% endwith %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
try:
viewname = parser.compile_filter(bits[1])
except TemplateSyntaxError as exc:
exc.args = (exc.args[0] + ". "
"The syntax of 'url' changed in Django 1.5, see the docs."),
raise
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
@register.tag
def verbatim(parser, token):
"""
Stops the template engine from rendering the contents of this block tag.
Usage::
{% verbatim %}
{% don't process this %}
{% endverbatim %}
You can also designate a specific closing tag block (allowing the
unrendered use of ``{% endverbatim %}``)::
{% verbatim myblock %}
...
{% endverbatim myblock %}
"""
nodelist = parser.parse(('endverbatim',))
parser.delete_first_token()
return VerbatimNode(nodelist.render(Context()))
@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such, this tag calculates the ratio of a given
value to a maximum value, and then applies that ratio to a constant.
For example::
<img src='bar.gif' height='10' width='{% widthratio this_value max_value max_width %}' />
If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100,
the image in the above example will be 88 pixels wide
(because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88).
"""
bits = token.split_contents()
if len(bits) != 4:
raise TemplateSyntaxError("widthratio takes three arguments")
tag, this_value_expr, max_value_expr, max_width = bits
return WidthRatioNode(parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width))
@register.tag('with')
def do_with(parser, token):
"""
Adds one or more values to the context (inside of this block) for caching
and easy access.
For example::
{% with total=person.some_sql_method %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
Multiple values can be added to the context::
{% with foo=1 bar=2 %}
...
{% endwith %}
The legacy format of ``{% with person.some_sql_method as total %}`` is
still accepted.
"""
bits = token.split_contents()
remaining_bits = bits[1:]
extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)
if not extra_context:
raise TemplateSyntaxError("%r expected at least one variable "
"assignment" % bits[0])
if remaining_bits:
raise TemplateSyntaxError("%r received an invalid token: %r" %
(bits[0], remaining_bits[0]))
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithNode(None, None, nodelist, extra_context=extra_context)
|
the-stack_106_29480 | #!/usr/bin/env python3
"""
Test suite for remote module
"""
import pytest
import requests_mock
from pathlib import Path
from cblaster import remote, helpers
TEST_DIR = Path(__file__).resolve().parent
def test_start_no_input():
with pytest.raises(ValueError):
# No query_file/ids
remote.start()
@pytest.fixture()
def start_response():
return (TEST_DIR / "start_response.html").read_text()
def test_start(start_response, monkeypatch):
def mock_sequences(query_file, query_ids):
return {'seq1': 'TEST', 'seq2': 'TEST'}
monkeypatch.setattr(helpers, "get_sequences", mock_sequences)
with requests_mock.Mocker() as mock:
mock.post(remote.BLAST_API_URL, text=start_response)
# Ensure RID/RTOE is returned
result = remote.start(
query_ids=["seq1", "seq2"],
entrez_query="Aspergillus[ORGN]"
)
assert result == ("VCZM3MWB014", 18)
# Check correct request URL
assert mock.request_history[0].url == (
"https://blast.ncbi.nlm.nih.gov/Blast.cgi?"
"CMD=PUT"
"&DATABASE=nr"
"&PROGRAM=blastp"
"&FILTER=F"
"&EXPECT=10"
"&GAPCOSTS=11+1"
"&MATRIX=BLOSUM62"
"&HITLIST_SIZE=5000"
"&ALIGNMENTS=5000"
"&DESCRIPTIONS=5000"
"&WORD_SIZE=6"
"&COMPOSITION_BASED_STATISTICS=2"
"&ENTREZ_QUERY=Aspergillus%5BORGN%5D"
"&THRESHOLD=11"
)
def test_start_blastn_options(start_response, monkeypatch):
def mock_sequences(query_file, query_ids):
return {'seq1': 'TEST', 'seq2': 'TEST'}
monkeypatch.setattr(helpers, "get_sequences", mock_sequences)
with requests_mock.Mocker() as mock:
mock.post(remote.BLAST_API_URL, text=start_response)
# megablast, nucl_* are blastn options, threshold is only BLASTp
remote.start(
query_ids=["seq1"],
program="blastn",
megablast=True,
nucl_penalty=99,
nucl_reward=99,
threshold=99,
)
# Check correct request URL
request = mock.request_history[0]
assert "THRESHOLD" not in request.url # Only blastp
assert all(
part in request.url
for part in ["NUCL_PENALTY=99", "NUCL_REWARD=99", "MEGABLAST=on"]
)
@pytest.fixture()
def check_response():
return (TEST_DIR / "check_response.html").read_text()
def test_check(check_response):
with requests_mock.Mocker() as mock:
mock.get(remote.BLAST_API_URL, text=check_response)
# Finds Status=READY and ThereAreHits=yes
assert remote.check("VCZM3MWB014") is True
# Check correct request URL
assert mock.request_history[0].url == (
"https://blast.ncbi.nlm.nih.gov/Blast.cgi?"
"CMD=Get"
"&RID=VCZM3MWB014"
"&FORMAT_OBJECT=SearchInfo"
)
@pytest.mark.parametrize(
"text", ["Status=UNKNOWN\n", "Status=FAILED\n", "Status=READY\nThereAreHits=no\n"]
)
def test_check_failed(text):
with requests_mock.Mocker() as mock, pytest.raises(ValueError):
mock.get(remote.BLAST_API_URL, text=text)
remote.check("RID")
def test_check_waiting():
with requests_mock.Mocker() as mock:
mock.get(remote.BLAST_API_URL, text="Status=WAITING\n")
assert remote.check("RID") is False
@pytest.fixture()
def retrieve_response():
return (TEST_DIR / "retrieve_response.html").read_text()
def test_retrieve(retrieve_response):
with requests_mock.Mocker() as mock:
mock.get(remote.BLAST_API_URL, text=retrieve_response)
result = remote.retrieve("RID")
# Make sure we've removed non-TSV cruft
assert len(result) == 300
assert not any(row.startswith(("#", "<", " ", "Qblast", "-")) for row in result)
assert mock.request_history[0].url == (
"https://blast.ncbi.nlm.nih.gov/Blast.cgi?"
"CMD=Get"
"&RID=RID"
"&FORMAT_TYPE=Tabular"
"&FORMAT_OBJECT=Alignment"
"&HITLIST_SIZE=5000"
"&ALIGNMENTS=5000"
"&DESCRIPTIONS=5000"
"&NCBI_GI=F"
)
def test_poll_success(monkeypatch):
def patch_check(rid):
return True
monkeypatch.setattr(remote, "check", patch_check)
assert remote.check("RID") is True
def test_poll_retry_limit(monkeypatch):
def returns_false(rid):
return False
monkeypatch.setattr(remote, "check", returns_false)
with pytest.raises(ValueError):
remote.poll("RID", delay=0, max_retries=2)
@pytest.fixture
def query_file():
return TEST_DIR / "test.faa"
def test_parse_empty_handle(query_file):
with pytest.raises(ValueError):
remote.parse([], query_file=query_file)
def test_parse(query_file):
# length of QBE85648 == 179
result = [
# qid sid pid len mismatch gapopen qstart qend sstart ssend evalue bitscore
"QBE85648.1\tHIT1\t100.000\t179\t0\t0\t1\t179\t1\t179\t1.38e-127\t365\t100.00",
"QBE85648.1\tHIT2\t20.000\t179\t0\t0\t1\t179\t1\t179\t1.38e-127\t365\t100.00",
"QBE85648.1\tHIT3\t100.000\t179\t0\t0\t150\t179\t1\t179\t1.38e-127\t365\t100.00",
"QBE85648.1\tHIT4\t100.000\t179\t0\t0\t1\t179\t1\t179\t0.011\t365\t100.00",
]
hits = remote.parse(result, query_file=query_file)
# Default thresholds are 30% identity, 50% coverage, 0.01 evalue
# so only the first hit should be saved
assert len(hits) == 1
assert hits[0].query == "QBE85648.1"
assert hits[0].subject == "HIT1"
assert hits[0].identity == 100.0
assert hits[0].coverage == 100.0
assert hits[0].bitscore == 365.0
assert hits[0].evalue == 1.38e-127
|
the-stack_106_29481 | import re
import os
import math
import logging
logger = logging.getLogger(__name__)
import numpy as np
from scipy.ndimage.filters import median_filter
import scipy.interpolate as intp
import scipy.signal as sg
import scipy.optimize as opt
import astropy.io.fits as fits
from astropy.table import Table
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmap
import matplotlib.ticker as tck
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from ..echelle.trace import ApertureSet
from ..utils.onedarray import get_local_minima, get_edge_bin
from ..utils.regression import get_clip_mean
from ..utils.regression2d import polyfit2d, polyval2d
from .imageproc import table_to_array, array_to_table
def find_background2(data, mask, channels, apertureset_lst,
method='poly', scale='linear', scan_step=200,
xorder=2, yorder=2, maxiter=5, upper_clip=3, lower_clip=3,
extend=True, display=True, fig_file=None, reg_file=None):
"""Subtract the background for an input FITS image.
Args:
data (:class:`numpy.ndarray`): Input data image.
mask (:class:`numpy.ndarray`): Mask of input data image.
channels (list): List of channels as strings.
apertureset_lst (dict): Dict of :class:`~edrs.echelle.trace.ApertureSet`
at different channels.
method (str): Method of finding background light.
scale (str): Scale of the 2-D polynomial fitting. If 'log', fit the
polynomial in the logrithm scale.
scan_step (int): Steps of scan in pixels.
xorder (int): Order of 2D polynomial along the main dispersion
direction (only applicable if **method** = "poly").
yorder (int): Order of 2D polynomial along the cross-dispersion
direction (only applicable if **method** = "poly").
maxiter (int): Maximum number of iteration of 2D polynomial fitting
(only applicable if **method** = "poly").
upper_clip (float): Upper sigma clipping threshold (only applicable if
**method** = "poly").
lower_clip (float): Lower sigma clipping threshold (only applicable if
**method** = "poly").
extend (bool): Extend the grid to the whole CCD image if *True*.
display (bool): Display figures on the screen if *True*.
fig_file (str): Name of the output figure. No image file generated if
*None*.
reg_file (string): Name of the output DS9 region file. No file generated
if *None*.
Returns:
:class:`numpy.ndarray`: Image of background light.
"""
plot = (display or fig_file is not None)
plot_paper_fig = False
h, w = data.shape
meddata = median_filter(data, size=(3,3), mode='reflect')
xnodes, ynodes, znodes = [], [], []
# find the minimum and maximum aperture number
min_aper = min([min(apertureset_lst[ch].keys()) for ch in channels])
max_aper = max([max(apertureset_lst[ch].keys()) for ch in channels])
# generate the horizontal scan list
x_lst = np.arange(0, w-1, scan_step)
# add the last column to the list
if x_lst[-1] != w-1:
x_lst = np.append(x_lst, w-1)
# find intra-order pixels
_message_lst = ['Column, N (between), N (extend), N (removed), N (total)']
for x in x_lst:
xsection = meddata[:,x]
inter_aper = []
prev_newy = None
# loop for every aperture
for aper in range(min_aper, max_aper+1):
# for a new aperture, initialize the count of channel
count_channel = 0
for ich, channel in enumerate(channels):
# check every channel in this frame
if aper in apertureset_lst[channel]:
count_channel += 1
this_newy = apertureset_lst[channel][aper].position(x)
if count_channel == 1 and prev_newy is not None:
# this channel is the first channel in this aperture and
# there is a previous y
mid_newy = (prev_newy + this_newy)//2
i1 = min(h-1, max(0, int(prev_newy)))
i2 = min(h-1, max(0, int(this_newy)))
#if len(inter_aper)==0 or \
# abs(mid_newy - inter_aper[-1])>scan_step*0.7:
# if i2-i1>0:
if i2-i1>0:
mid_newy = i1 + xsection[i1:i2].argmin()
inter_aper.append(mid_newy)
prev_newy = this_newy
inter_aper = np.array(inter_aper)
# count how many nodes found between detected orders
n_nodes_inter = inter_aper.size
# if extend = True, expand the grid with polynomial fitting to
# cover the whole CCD area
n_nodes_extend = 0
if extend:
if x==2304:
_fig = plt.figure(dpi=150)
_ax = _fig.gca()
for _x in inter_aper:
_ax.axvline(x=_x,color='g', ls='--',lw=0.5, alpha=0.6)
_ax.plot(data[:, x],'b-',lw=0.5)
_fig2 = plt.figure(dpi=150)
_ax2 = _fig2.gca()
print(inter_aper)
coeff = np.polyfit(np.arange(inter_aper.size), inter_aper, deg=3)
if x== 2304:
_ax2.plot(np.arange(inter_aper.size), inter_aper,'go', alpha=0.6)
_newx = np.arange(0, inter_aper.size, 0.1)
_ax2.plot(_newx, np.polyval(coeff, _newx),'g-')
# find the points after the end of inter_aper
ii = inter_aper.size-1
new_y = inter_aper[-1]
while(new_y<h-1):
ii += 1
new_y = int(np.polyval(coeff,ii))
inter_aper = np.append(inter_aper,new_y)
n_nodes_extend += 1
# find the points before the beginning of order_mid
ii = 0
new_y = inter_aper[0]
while(new_y>0):
ii -= 1
new_y = int(np.polyval(coeff,ii))
inter_aper = np.insert(inter_aper,0,new_y)
n_nodes_extend += 1
if x==2304:
#for _x in np.polyval(coeff, np.arange(0,25)):
# _ax.axvline(x=_x, color='r',ls='--',lw=0.5)
#_newx = np.arange(0, 25)
#_ax2.plot(_newx, np.polyval(coeff, _newx), 'ro', alpha=0.6)
plt.show()
# remove those points with y<0 or y>h-1
m1 = inter_aper > 0
m2 = inter_aper < h-1
inter_aper = inter_aper[np.nonzero(m1*m2)[0]]
# filter those masked pixels
m = mask[inter_aper, x]==0
inter_aper = inter_aper[m]
# remove backward points
tmp = np.insert(inter_aper,0,0.)
m = np.diff(tmp)>0
inter_aper = inter_aper[np.nonzero(m)[0]]
# count how many nodes removed
n_nodes_removed = (n_nodes_inter + n_nodes_extend) - inter_aper.size
# pack infos into message list
_message_lst.append('| %6d | %6d | %6d | %6d | %6d |'%(
x, n_nodes_inter, n_nodes_extend, n_nodes_removed, inter_aper.size))
# pack all nodes
for y in inter_aper:
xnodes.append(x)
ynodes.append(y)
znodes.append(meddata[y,x])
# extrapolate
#if extrapolate:
if False:
_y0, _y1 = inter_aper[0], inter_aper[1]
newy = _y0 - (_y1 - _y0)
newz = meddata[_y0, x] - (meddata[_y1, x] - meddata[_y0, x])
xnodes.append(x)
ynodes.append(newy)
znodes.append(newz)
_y1, _y2 = inter_aper[-2], inter_aper[-1]
newy = _y2 + (_y2 - _y1)
newz = meddata[_y2, x] + (meddata[_y2, x] - meddata[_y1, x])
xnodes.append(x)
ynodes.append(newy)
znodes.append(newz)
# convert to numpy array
xnodes = np.array(xnodes)
ynodes = np.array(ynodes)
znodes = np.array(znodes)
# write to running log
_message_lst.append('Total: %4d'%xnodes.size)
logger.info((os.linesep+' ').join(_message_lst))
# if scale='log', filter the negative values
if scale=='log':
pmask = znodes > 0
znodes[~pmask] = znodes[pmask].min()
znodes = np.log10(znodes)
if plot:
# initialize figures
fig = plt.figure(figsize=(10,10), dpi=150)
ax11 = fig.add_axes([0.07, 0.54, 0.39, 0.39])
ax12 = fig.add_axes([0.52, 0.54, 0.39, 0.39])
ax13 = fig.add_axes([0.94, 0.54, 0.015, 0.39])
ax21 = fig.add_axes([0.07, 0.07, 0.39, 0.39], projection='3d')
ax22 = fig.add_axes([0.52, 0.07, 0.39, 0.39], projection='3d')
fig.suptitle('Background')
ax11.imshow(data, cmap='gray')
# plot nodes
for ax in [ax11, ax12]:
ax.set_xlim(0,w-1)
ax.set_ylim(h-1,0)
ax.set_xlabel('X (pixel)', fontsize=10)
ax.set_ylabel('Y (pixel)', fontsize=10)
for ax in [ax21, ax22]:
ax.set_xlim(0,w-1)
ax.set_ylim(0,h-1)
ax.set_xlabel('X (pixel)', fontsize=10)
ax.set_ylabel('Y (pixel)', fontsize=10)
for ax in [ax11, ax12]:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for ax in [ax21, ax22]:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for tick in ax.zaxis.get_major_ticks():
tick.label1.set_fontsize(9)
if display:
plt.show(block=False)
# plot the figure used in paper
if plot_paper_fig:
figp1 = plt.figure(figsize=(6,6), dpi=150)
axp1 = figp1.add_axes([0.00, 0.05, 1.00, 0.95], projection='3d')
figp2 = plt.figure(figsize=(6.5,6), dpi=150)
axp2 = figp2.add_axes([0.12, 0.1, 0.84, 0.86])
if method=='poly':
background_data, fitmask = fit_background(data.shape,
xnodes, ynodes, znodes, xorder=xorder, yorder=yorder,
maxiter=maxiter, upper_clip=upper_clip, lower_clip=lower_clip)
elif method=='interp':
background_data, fitmask = interpolate_background(data.shape,
xnodes, ynodes, znodes)
else:
print('Unknown method: %s'%method)
m = (ynodes >= 0)*(ynodes <= h-1)
xnodes = xnodes[m]
ynodes = ynodes[m]
znodes = znodes[m]
fitmask = fitmask[m]
if scale=='log':
background_data = np.power(10, background_data)
# save nodes to DS9 region file
if reg_file is not None:
outfile = open(reg_file, 'w')
outfile.write('# Region file format: DS9 version 4.1'+os.linesep)
outfile.write('global color=green dashlist=8 3 width=1 ')
outfile.write('font="helvetica 10 normal roman" select=1 highlite=1 ')
outfile.write('dash=0 fixed=0 edit=1 move=1 delete=1 include=1 ')
outfile.write('source=1'+os.linesep)
outfile.write('physical'+os.linesep)
for x, y in zip(xnodes, ynodes):
text = ('point(%4d %4d) # point=circle'%(x+1, y+1))
outfile.write(text+os.linesep)
outfile.close()
# write nodes to running log
message = ['Background Nodes:', ' x, y, value, mask']
for x,y,z,m in zip(xnodes, ynodes, znodes, fitmask):
message.append('| %4d | %4d | %+10.8e | %1d |'%(x,y,z,m))
logger.info((os.linesep+' '*4).join(message))
residual = znodes - background_data[ynodes, xnodes]
if plot:
# prepare for plotting the fitted surface with a loose grid
yy, xx = np.meshgrid(np.linspace(0,h-1,32), np.linspace(0,w-1,32))
yy = np.int16(np.round(yy))
xx = np.int16(np.round(xx))
zz = background_data[yy, xx]
# plot 2d fitting in a 3-D axis in fig2
# plot the linear fitting
ax21.set_title('Background fitting (%s Z)'%scale, fontsize=10)
ax22.set_title('residuals (%s Z)'%scale, fontsize=10)
ax21.plot_surface(xx, yy, zz, rstride=1, cstride=1, cmap='jet',
linewidth=0, antialiased=True, alpha=0.5)
ax21.scatter(xnodes[fitmask], ynodes[fitmask], znodes[fitmask],
color='C0', linewidth=0)
ax22.scatter(xnodes[fitmask], ynodes[fitmask], residual[fitmask],
color='C0', linewidth=0)
if (~fitmask).sum()>0:
ax21.scatter(xnodes[~fitmask], ynodes[~fitmask], znodes[~fitmask],
color='none', edgecolor='C0', linewidth=1)
ax22.scatter(xnodes[~fitmask], ynodes[~fitmask], residual[~fitmask],
color='none', edgecolor='C0', linewidth=1)
# plot the logrithm fitting in another fig
#if scale=='log':
# ax23.plot_surface(xx, yy, log_zz, rstride=1, cstride=1, cmap='jet',
# linewidth=0, antialiased=True, alpha=0.5)
# ax23.scatter(xnodes[fitmask], ynodes[fitmask], zfit[fitmask], linewidth=0)
# ax24.scatter(xnodes[fitmask], ynodes[fitmask], log_residual[fitmask], linewidth=0)
for ax in [ax21, ax22]:
ax.xaxis.set_major_locator(tck.MultipleLocator(500))
ax.xaxis.set_minor_locator(tck.MultipleLocator(100))
ax.yaxis.set_major_locator(tck.MultipleLocator(500))
ax.yaxis.set_minor_locator(tck.MultipleLocator(100))
if display: fig.canvas.draw()
# plot figure for paper
if plot_paper_fig:
axp1.plot_surface(xx, yy, zz, rstride=1, cstride=1, cmap='jet',
linewidth=0, antialiased=True, alpha=0.5)
axp1.scatter(xnodes[fitmask], ynodes[fitmask], znodes[fitmask], linewidth=0)
axp1.xaxis.set_major_locator(tck.MultipleLocator(500))
axp1.xaxis.set_minor_locator(tck.MultipleLocator(100))
axp1.yaxis.set_major_locator(tck.MultipleLocator(500))
axp1.yaxis.set_minor_locator(tck.MultipleLocator(100))
axp1.set_xlim(0, w-1)
axp1.set_ylim(0, h-1)
axp1.set_xlabel('X')
axp1.set_ylabel('Y')
axp1.set_zlabel('Count')
if plot:
# plot the accepted nodes in subfig 1
ax11.scatter(xnodes[fitmask], ynodes[fitmask],
c='r', s=6, linewidth=0, alpha=0.8)
# plot the rejected nodes
if (~fitmask).sum()>0:
ax11.scatter(xnodes[~fitmask], ynodes[~fitmask],
c='none', s=6, edgecolor='r', linewidth=0.5)
# plot subfig 2
cnorm = colors.Normalize(vmin = background_data.min(),
vmax = background_data.max())
scalarmap = cmap.ScalarMappable(norm=cnorm, cmap=cmap.jet)
# plot the background light
image = ax12.imshow(background_data, cmap=scalarmap.get_cmap())
# plot the accepted nodes
ax12.scatter(xnodes[fitmask], ynodes[fitmask],
c='k', s=6, linewidth=0.5)
# plot the rejected nodes
if (~fitmask).sum()>0:
ax12.scatter(xnodes[~fitmask], ynodes[~fitmask],
c='none', s=6, edgecolor='k', linewidth=0.5)
# set colorbar
plt.colorbar(image, cax=ax13)
# set font size of colorbar
for tick in ax13.get_yaxis().get_major_ticks():
tick.label2.set_fontsize(9)
if display: fig.canvas.draw()
# plot for figure in paper
if plot_paper_fig:
pmask = data>0
logdata = np.zeros_like(data)-1
logdata[pmask] = np.log(data[pmask])
axp2.imshow(logdata, cmap='gray')
axp2.scatter(xnodes, ynodes, c='b', s=8, linewidth=0, alpha=0.8)
cs = axp2.contour(background_data, linewidth=1, cmap='jet')
axp2.clabel(cs, inline=1, fontsize=11, fmt='%d', use_clabeltext=True)
axp2.set_xlim(0, w-1)
axp2.set_ylim(h-1, 0)
axp2.set_xlabel('X')
axp2.set_ylabel('Y')
figp1.savefig('fig_background1.png')
figp2.savefig('fig_background2.png')
figp1.savefig('fig_background1.pdf')
figp2.savefig('fig_background2.pdf')
plt.close(figp1)
plt.close(figp2)
if fig_file is not None:
fig.savefig(fig_file)
plt.close(fig)
return background_data
def fit_background(shape, xnodes, ynodes, znodes, xorder=2, yorder=2,
maxiter=5, upper_clip=3, lower_clip=3):
"""Find the background light by fitting a 2D polynomial.
Args:
shape (tuple): Shape of image.
xnodes (:class:`numpy.ndarray`): List of X coordinates of the nodes.
ynodes (:class:`numpy.ndarray`): List of Y coordinates of the nodes.
znodes (:class:`numpy.ndarray`): List of pixel values of the nodes.
xorder (int): Order of 2D polynomial along the main dispersion
direction.
yorder (int): Order of 2D polynomial along the cross-dispersion
direction.
maxiter (int): Maximum number of iteration of 2D polynomial fitting.
upper_clip (float): Upper sigma clipping threshold.
lower_clip (float): Lower sigma clipping threshold.
Returns:
tuple: A tuple containing:
* **background_data** (:class:`numpy.ndarray`) – Array of background
light.
* **mask** (:class:`numpy.ndarray`) – Mask of used nodes in the
fitting.
See also:
:func:`interpolate_background`
"""
h, w = shape
# normalize to 0 ~ 1 for x and y nodes
xfit = np.float64(xnodes)/w
yfit = np.float64(ynodes)/h
zfit = znodes
# fit the 2-d polynomial
_messages = [
'Polynomial Background Fitting Xorder=%d, Yorder=%d:'%(xorder, yorder)
]
mask = np.ones_like(zfit, dtype=np.bool)
for niter in range(maxiter):
coeff = polyfit2d(xfit[mask], yfit[mask], zfit[mask],
xorder=xorder, yorder=yorder)
values = polyval2d(xfit, yfit, coeff)
residuals = zfit - values
sigma = residuals[mask].std(dtype=np.float64)
m1 = residuals < upper_clip*sigma
m2 = residuals > -lower_clip*sigma
new_mask = m1*m2
# write info to running log
_message = 'Iter. %d: std=%10.6f, N=%4d, N(new)=%4d'%(
niter, sigma, mask.sum(), new_mask.sum())
_messages.append(_message)
if new_mask.sum() == mask.sum():
break
mask = new_mask
logger.debug((os.linesep+' '*4).join(_messages))
yy, xx = np.mgrid[:h:, :w:]
background_data = polyval2d(xx/w, yy/h, coeff)
return background_data, mask
def interpolate_background(shape, xnodes, ynodes, znodes):
"""Find the background light by interpolating 2D cubic splines.
Args:
shape (tuple): Shape of image.
xnodes (:class:`numpy.ndarray`): List of X coordinates of the nodes.
ynodes (:class:`numpy.ndarray`): List of Y coordinates of the nodes.
znodes (:class:`numpy.ndarray`): List of pixel values of the nodes.
Returns:
tuple: A tuple containing:
* **background_data** (:class:`numpy.ndarray`) – Array of background
light.
* **mask** (:class:`numpy.ndarray`) – Mask of used nodes in the
fitting.
See also:
:func:`fit_background`
"""
h, w = shape
yy, xx = np.mgrid[:h:, :w:]
background_data = intp.griddata((xnodes, ynodes), znodes, (xx, yy),
rescale=True, method='cubic')
mask = np.ones_like(znodes, dtype=np.bool)
# fix non values
notnan_mask = ~np.isnan(background_data)
for j in np.arange(w):
array = background_data[:, j]
m = notnan_mask[:, j]
notnan_index = np.nonzero(m)[0]
i1 = notnan_index[0]
if i1 > 0:
background_data[0:i1, j] = array[i1]
i2 = notnan_index[-1]
if i2 < h-1:
background_data[i2+1:, j] = array[i2]
return background_data, mask
def find_background(data, mask, aperturesets, ncols, distance,
yorder=7, ymaxiter=5, yupper_clip=3, ylower_clip=3,
fig_stray=None, fig_section=None):
"""Subtract the background for an input FITS image.
Args:
data (:class:`numpy.ndarray`): Input data image.
mask (:class:`numpy.ndarray`): Mask of input data image.
aperturesets (:class:`~gamse.echelle.trace.ApertureSet` or dict):
A :class:`~gamse.echelle.trace.ApertureSet` instance, or a dict of
:class:`~gamse.echelle.trace.ApertureSet` at different channels.
yorder (int): Order of polynomial along the cross-dispersion
direction.
fig_stray (str): Name of the figure showing stray light. No file
generated if *None*.
fig_section (str): Name of the figure showing cross-sections. No file
generated if *None*.
Returns:
:class:`numpy.ndarray`: Image of background light. It has the same shape
and datatype as the arg **data**.
"""
h, w = data.shape
cols = np.int32(np.round(np.linspace(1, w-2, ncols)))
# prepare for cross-section figure
plot_section = (fig_section is not None)
if plot_section:
plot_cols = [cols[np.abs(cols - h*t).argmin()]
for t in np.linspace(0, 1, 5)]
fig1 = plt.figure(figsize=(18,12), dpi=150)
tick_size = 13
label_size = 14
ally = np.arange(h)
# prepare interpolation grid
grid = []
# parse apertureset_lst
if isinstance(aperturesets, ApertureSet):
# put input aperture in a dict
apertureset_lst = {'A': aperturesets}
elif isinstance(aperturesets, dict):
apertureset_lst = aperturesets
else:
print('Unknown aperturesets:',aperturesets)
exit()
for x in cols:
xsection = np.median(data[:,x-1:x+2], axis=1)
intermask = np.ones(h, dtype=np.bool)
allimin, _ = get_local_minima(xsection)
allmin = np.array([v in allimin for v in np.arange(h)])
if plot_section and x in plot_cols:
i = plot_cols.index(x)
ax1 = fig1.add_axes([0.05, (4-i)*0.19+0.05, 0.93, 0.18])
#ax2 = ax1.twinx()
ax1.plot(xsection, ls='-' ,color='C0', lw=0.5, alpha=0.2)
for ichannel, (channel, apertureset) in enumerate(sorted(apertureset_lst.items())):
# predict post- and pre-aperutres.
# post- and pre-apertures are virtual apertures that are not
# identified by the order detection function, probabaly because they
# are too weak
y_lst, aper_lst = [], []
apercen_lst = []
for aper, aperloc in apertureset.items():
y = aperloc.position(x)
apercen_lst.append(y)
y_lst.append(y)
aper_lst.append(aper)
y_lst = np.array(y_lst)
aper_lst = np.array(aper_lst)
apercen_lst = np.array(apercen_lst)
coeff = np.polyfit(aper_lst, y_lst, deg=3)
# find post apertures
aper = aper_lst[-1]
post_aper_lst, post_apercen_lst = [], []
while(True):
aper += 1
y = np.polyval(coeff, aper)
if 0 < y < h-1:
post_aper_lst.append(aper)
post_apercen_lst.append(y)
else:
break
post_aper_lst = np.array(post_aper_lst)
post_apercen_lst = np.array(post_apercen_lst)
# find pre apertures
aper = aper_lst[0]
pre_aper_lst, pre_apercen_lst = [], []
while(True):
aper -= 1
y = np.polyval(coeff, aper)
if 0 < y < h-1:
pre_aper_lst.append(aper)
pre_apercen_lst.append(y)
else:
break
pre_aper_lst = np.array(pre_aper_lst)
pre_apercen_lst = np.array(pre_apercen_lst)
'''
# plot aper_lst, pre-aperture list, and post-aperture list
if plot_section and x in plot_cols:
_color = 'C%d'%ichannel
ax2.plot(y_lst, aper_lst, 'o',
color=_color, ms=3, alpha=0.5)
ax2.plot(post_apercen_lst, post_aper_lst, '^',
color=_color, ms=3, alpha=0.5)
ax2.plot(pre_apercen_lst, pre_aper_lst, 'v',
color=_color, ms=3, alpha=0.5)
_newx = np.arange(aper_lst[0], aper_lst[-1], 0.1)
ax2.plot(np.polyval(coeff, _newx), _newx, '-',
color=_color, lw=1, alpha=0.5)
'''
for y in np.concatenate((apercen_lst, post_apercen_lst, pre_apercen_lst)):
mask = np.abs(ally - y) > distance
intermask *= mask
if plot_section and x in plot_cols:
# plot order center using vertical lines
ax1.axvline(x=y, color='C%d'%ichannel,
ls='--', lw=0.5, alpha=0.3)
if plot_section and x in plot_cols:
_yplot = np.copy(xsection)
_yplot[~intermask] = np.NaN
ax1.plot(_yplot, '-', color='C0', lw=0.7, alpha=0.5)
notnanindex = np.nonzero(intermask)[0]
group_lst = np.split(notnanindex, np.where(np.diff(notnanindex)!=1)[0]+1)
fitx_lst, fity_lst, fityerr_lst = [], [], []
for group in group_lst:
ydata = xsection[group]
local_min = allmin[group]
idx = np.nonzero(local_min)[0]
if idx.size == 0:
continue
if idx.size == 1:
meanx = ydata.argmin() + group[0]
mean, std = ydata.min(),0
else:
i1, i2 = idx[0], idx[-1]+1
mean, std, m = get_clip_mean(ydata[i1:i2], high=2, low=3, maxiter=10)
meanx = np.arange(i1, i2)[m].mean() + group[0]
if mean > 0 and std/math.sqrt(mean) > 2 and (i2-i1) >= 5:
# remove the two largest points
m = ydata[i1:i2].argsort().argsort() < i2-i1-2
mean, std, m = get_clip_mean(ydata[i1:i2], mask=m, high=2, low=3, maxiter=10)
meanx = np.arange(i1,i2)[m].mean() + group[0]
ii1 = i1 + group[0]
ii2 = i2 + group[0]
if plot_section and x in plot_cols:
ax1.plot(np.arange(ii1, ii2), xsection[ii1:ii2], ls='-',
color='C3', lw=0.8, alpha=0.8)
if m.sum() < m.size:
ax1.plot(np.arange(ii1, ii2)[~m], xsection[ii1:ii2][~m],
'o', color='gray', ms=3, lw=1, alpha=0.5)
fitx_lst.append(meanx)
fity_lst.append(mean)
fityerr_lst.append(std)
#print('%4d %4d %10.6f %10.6f %10.6f'%(
# group[0], group[-1], mean, std, std/math.sqrt(abs(mean))))
fitx_lst = np.array(fitx_lst)
fity_lst = np.array(fity_lst)
fityerr_lst = np.array(fityerr_lst)
maxiter = 5
mask = fity_lst > 0
for ite in range(maxiter):
coeff = np.polyfit(fitx_lst[mask]/h, np.log(fity_lst[mask]), deg=yorder)
yres = np.log(fity_lst) - np.polyval(coeff, fitx_lst/h)
std = yres[mask].std()
allflux = np.polyval(coeff, ally/h)
allflux = np.exp(allflux)
new_mask = (yres < 2.*std)*(yres > -5.*std)
if new_mask.sum()==mask.sum():
break
mask = new_mask
grid.append(allflux)
if plot_section and x in plot_cols:
# plot fitx and fity with errorbars
ax1.errorbar(fitx_lst[~mask], fity_lst[~mask], yerr=fityerr_lst[~mask],
fmt='o', mfc='w', mec='C2', ms=3, mew=1,
ecolor='C2', elinewidth=0.8, alpha=0.8)
ax1.errorbar(fitx_lst[mask], fity_lst[mask], yerr=fityerr_lst[mask],
fmt='o', mfc='C2', mec='C2', ms=3, mew=1,
ecolor='C2', elinewidth=0.8, alpha=0.8)
ax1.plot(ally, allflux, '-', color='C2', lw=0.7)
_ymin, _ymax = fity_lst[mask].min(), fity_lst[mask].max()
_y1, _y2 = 1.2*_ymin-0.2*_ymax, 1.2*_ymax-0.2*_ymin
ax1.set_ylim(_y1, _y2)
ax1.set_xlim(0, h-1)
ax1.text(0.03*h, 0.8*_y1+0.2*_y2, 'x=%d'%x,
fontsize=label_size, alpha=0.8)
for tick in ax1.xaxis.get_major_ticks():
tick.label1.set_fontsize(tick_size)
for tick in ax1.yaxis.get_major_ticks():
tick.label1.set_fontsize(tick_size)
#for tick in ax2.yaxis.get_major_ticks():
# tick.label2.set_fontsize(tick_size)
# tick.label2.set_color('C0')
#for tickline in ax2.yaxis.get_ticklines():
# tickline.set_color('C0')
if i < 4:
ax1.set_xticklabels([])
else:
ax1.set_xlabel('Y', fontsize=label_size)
ax1.set_ylabel('Flux', fontsize=label_size)
#ax2.set_ylabel('Aperture Number', fontsize=label_size, color='C0')
if plot_section:
fig1.savefig(fig_section)
plt.close(fig1)
grid = np.array(grid)
stray = np.zeros_like(data, dtype=data.dtype)
for y in np.arange(h):
f = intp.InterpolatedUnivariateSpline(cols, grid[:,y], k=3)
stray[y,:] = f(np.arange(w))
return stray
def simple_debackground(data, mask, xnodes, smooth=20, maxiter=10, deg=3):
"""
"""
ny, nx = data.shape
allx = np.arange(ny)
if smooth is not None:
core = np.hanning(smooth)
core = core/core.sum()
# prepare interpolation grid
grid = []
for x in xnodes:
section = data[:, x]
sect_mask = mask[:, x]>0
if sect_mask.sum() > 0:
f = intp.InterpolatedUnivariateSpline(
allx[~sect_mask], section[~sect_mask], k=3, ext=3)
section = f(allx)
if smooth is not None:
section_new = np.convolve(section, core, mode='same')
else:
section_new = section
allimin, allmin = get_local_minima(section_new)
# remove the first and last local minima
m = (allimin>0) * (allimin<ny-1)
allimin = allimin[m]
allmin = allmin[m]
#allimin, allmin = get_local_minima(section)
#mask = np.ones_like(allmin, dtype=np.bool)
fitmask = (allmin > 0)*(sect_mask[allimin]==0)
for i in range(maxiter):
coeff = np.polyfit(allimin[fitmask]/ny, np.log(allmin[fitmask]),
deg=deg)
#res_lst = allmin - np.exp(np.polyval(coeff, allimin/ny))
res_lst = np.log(allmin) - np.polyval(coeff, allimin/ny)
std = res_lst[fitmask].std()
mask1 = res_lst < 3*std
mask2 = res_lst > -3*std
new_fitmask = mask1*mask2
if new_fitmask.sum() == fitmask.sum():
break
else:
fitmask = fitmask*new_fitmask
logbkg = np.polyval(coeff, allx/ny)
linbkg = np.exp(logbkg)
######################## plot #####################
#figname = 'bkg-b-%04d.png'%x
figname = None
if figname is not None:
fig = plt.figure(dpi=150, figsize=(10,8))
ax = fig.gca()
ax.plot(allx, linbkg, color='C0')
#ax.plot(allx, linbkg+std, color='C0', ls='--')
#ax.plot(allx, linbkg-std, color='C0', ls='--')
ax.plot(allx, section, color='C1', lw=0.5)
if smooth is not None:
ax.plot(allx, section_new, color='C2', lw=0.5)
ax.scatter(allimin, allmin, c='C3', s=10)
ax.set_yscale('log')
plt.savefig(figname)
plt.close(fig)
###################################################
logbkg = np.polyval(coeff, allx/ny)
grid.append(logbkg)
# interpolate the whole image
grid = np.array(grid)
stray = np.zeros_like(data, dtype=data.dtype)
for y in np.arange(ny):
f = intp.InterpolatedUnivariateSpline(xnodes, grid[:,y], k=3)
stray[y, :] = f(np.arange(nx))
pmask = data>0
corrected_data = np.zeros_like(data)
corrected_data[pmask] = np.log(data[pmask]) - stray[pmask]
return np.exp(corrected_data)
def get_interorder_background(data, mask=None, apertureset=None, **kwargs):
"""Get inter-order background light from a given image.
Args:
data ():
mask ():
apertureset ():
"""
figname = kwargs.pop('figname', 'bkg_{:04d}.png')
distance = kwargs.pop('distance', 7)
if mask is None:
mask = np.zeros_like(data, dtype=np.int32)
ny, nx = data.shape
bkg_image = np.zeros_like(data, dtype=np.float32)
allrows = np.arange(ny)
plot_x = []
for x in np.arange(nx):
if x in plot_x:
plot = True
fig1 = plt.figure(figsize=(12,8))
ax01 = fig1.add_subplot(211)
ax02 = fig1.add_subplot(212)
else:
plot = False
mask_rows = np.zeros_like(allrows, dtype=np.bool)
for aper, aperloc in sorted(apertureset.items()):
ycen = aperloc.position(x)
if plot:
ax01.axvline(x=ycen, color='C0', ls='--', lw=0.5, alpha=0.4)
ax02.axvline(x=ycen, color='C0', ls='--', lw=0.5, alpha=0.4)
imask = np.abs(allrows - ycen) < distance
mask_rows += imask
if plot:
ax01.plot(allrows, data[:, x], color='C0', alpha=0.3, lw=0.7)
x_lst, y_lst = [], []
for (y1, y2) in get_edge_bin(~mask_rows):
if plot:
ax01.plot(allrows[y1:y2], data[y1:y2,x],
color='C0', alpha=1, lw=0.7)
ax02.plot(allrows[y1:y2], data[y1:y2,x],
color='C0', alpha=1, lw=0.7)
if y2-y1>1:
yflux = data[y1:y2, x]
ymask = mask[y1:y2, x]
xlist = np.arange(y1, y2)
# block the highest point and calculate mean
_m = xlist == y1 + np.argmax(yflux)
mean = yflux[~_m].mean()
std = yflux[~_m].std()
if yflux.max() < mean + 3.*std:
meany = yflux.mean()
meanx = (y1+y2-1)/2
else:
meanx = xlist[~_m].mean()
meany = mean
else:
meany = data[y1,x]
meanx = y1
x_lst.append(meanx)
y_lst.append(meany)
x_lst = np.array(x_lst)
y_lst = np.array(y_lst)
y_lst = np.maximum(y_lst, 0)
y_lst = sg.medfilt(y_lst, 3)
f = intp.InterpolatedUnivariateSpline(x_lst, y_lst, k=3, ext=3)
bkg = f(allrows)
bkg_image[:, x] = bkg
if plot:
ax01.plot(x_lst, y_lst, 'o', color='C3', ms=3)
ax02.plot(x_lst, y_lst, 'o', color='C3', ms=3)
ax01.plot(allrows, bkg, ls='-', color='C3', lw=0.7, alpha=1)
ax02.plot(allrows, bkg, ls='-', color='C3', lw=0.7, alpha=1)
_y1, _y2 = ax02.get_ylim()
ax02.plot(allrows, data[:, x], color='C0', alpha=0.3, lw=0.7)
ax02.set_ylim(_y1, _y2)
ax01.set_xlim(0, ny-1)
ax02.set_xlim(0, ny-1)
fig1.savefig(figname.format(x))
plt.close(fig1)
return bkg_image
def get_xdisp_profile(data, apertureset):
"""Get brightness profile along the cross-dispersion direction.
Args:
data (numpy.ndarray):
apertureset ():
Returns:
tuple: A tuple containing:
* list of aperture numbers
* list of aperture positions
* list of aperture britness
"""
# get order brightness profile
ny, nx = data.shape
yy, xx = np.mgrid[:ny:, :nx:]
aper_num_lst, aper_brt_lst, aper_pos_lst = [], [], []
x_lst = np.arange(nx)
for aper, aperloc in sorted(apertureset.items()):
ycen_lst = aperloc.position(x_lst)
m1 = yy > ycen_lst - 1
m2 = yy < ycen_lst + 2
mask_image = m1*m2
maxflux_lst = (data*mask_image).max(axis=0)
# maxflux is a spectrum but with maximum values in each pixel
brightness = np.percentile(maxflux_lst, 99)
aper_num_lst.append(aper)
aper_brt_lst.append(brightness)
aper_pos_lst.append(aperloc.position(nx//2))
aper_num_lst = np.array(aper_num_lst)
aper_brt_lst = np.array(aper_brt_lst)
aper_pos_lst = np.array(aper_pos_lst)
return aper_num_lst, aper_pos_lst, aper_brt_lst
def find_profile_scale(input_profile, ref_profile):
"""Find the scaling factor of two brightness profiles.
"""
fitfunc = lambda s: ref_profile*s
errfunc = lambda s: input_profile - fitfunc(s)
s0 = np.median(input_profile)/np.median(ref_profile)
fitres = opt.least_squares(errfunc, s0)
s = fitres.x[0]
return s
class BackgroundLight(object):
def __init__(self, info=None, header=None, data=None, aper_num_lst=None,
aper_ord_lst=None, aper_pos_lst=None, aper_brt_lst=None,
aper_wav_lst=None):
"""
"""
self.info = info
self.header = header
self.data = data
self.aper_num_lst = aper_num_lst
self.aper_ord_lst = aper_ord_lst
self.aper_pos_lst = aper_pos_lst
self.aper_brt_lst = aper_brt_lst
self.aper_wav_lst = aper_wav_lst
def get_wavelength(self, aperture=None, order=None):
"""Get wavelength of a specific aperture or order.
Args:
aperture (int): Aperture number.
order (int): Order number.
Returns:
*float*: wavelength of the specific aperture or order.
"""
if aperture is not None and order is None:
# aperture is given and order is NOT given
for i, aper in enumerate(self.aper_num_lst):
if aper == aperture:
return self.aper_wav_lst[i]
print('Error: Aperture {} does not exist'.format(aperture))
raise ValueError
elif order is not None and aperture is None:
# order is given and aperture is NOT given
for i, o in enumerate(self.aper_ord_lst):
if o == order:
return self.aper_wav_lst[i]
print('Error: Order {} does not exist'.format(order))
raise ValueError
else:
raise ValueError
def get_brightness(self, aperture=None, order=None):
"""Get brightness of a specific aperture or order.
Args:
aperture (int): Aperture number.
order (int): Order number.
Returns:
*float*: brigtness of the specific aperture or order.
"""
if aperture is not None and order is None:
# aperture is given and order is NOT given
for i, aper in enumerate(self.aper_num_lst):
if aper == aperture:
return self.aper_brt_lst[i]
print('Error: Aperture {} does not exist'.format(aperture))
raise ValueError
elif order is not None and aperture is None:
# order is given and aperture is NOT given
for i, o in enumerate(self.aper_ord_lst):
if o == order:
return self.aper_brt_lst[i]
print('Error: Order {} does not exist'.format(order))
raise ValueError
else:
raise ValueError
def get_position(self, aperture=None, order=None):
"""Get position of a specific aperture or order.
Args:
aperture (int): Aperture number.
order (int): Order number.
Returns:
*float*: position of the specific aperture or order.
"""
if aperture is not None and order is None:
# aperture is given and order is NOT given
for i, aper in enumerate(self.aper_num_lst):
if aper == aperture:
return self.aper_pos_lst[i]
print('Error: Aperture {} does not exist'.format(aperture))
raise ValueError
elif order is not None and aperture is None:
# order is given and aperture is NOT given
for i, o in enumerate(self.aper_ord_lst):
if o == order:
return self.aper_pos_lst[i]
print('Error: Order {} does not exist'.format(order))
raise ValueError
else:
raise ValueError
def savefits(self, filename):
"""Save this object to FITS file.
Args:
filename (str):
"""
prefix = 'HIERARCH GAMSE BACKGROUNDLIGHT '
self.header.append((prefix + 'FILEID', self.info['fileid']))
self.header.append((prefix + 'FIBER', self.info['fiber']))
self.header.append((prefix + 'OBJECT', self.info['object']))
#self.header.append((prefix + 'OBJTYPE', self.info['objtype']))
self.header.append((prefix + 'EXPTIME', self.info['exptime']))
self.header.append((prefix + 'DATE-OBS', self.info['date-obs']))
for aper, order, pos, brt, wav in zip(self.aper_num_lst,
self.aper_ord_lst,
self.aper_pos_lst,
self.aper_brt_lst,
self.aper_wav_lst,
):
prefix2 = prefix + 'APERTURE {} '.format(aper)
self.header.append((prefix2 + 'ORDER', order))
self.header.append((prefix2 + 'POSITION', pos))
self.header.append((prefix2 + 'BRIGHTNESS', brt))
self.header.append((prefix2 + 'WAVELENGTH', wav))
fits.writeto(filename, self.data, self.header, overwrite=True)
@staticmethod
def read(filename):
data, head = fits.getdata(filename, header=True)
prefix = 'GAMSE BACKGROUNDLIGHT '
info = {'fileid': head[prefix + 'FILEID'],
'fiber': head[prefix + 'FIBER'],
'object': head[prefix + 'OBJECT'],
#'objtype': head[prefix + 'OBJTYPE'],
'exptime': head[prefix + 'EXPTIME'],
'date-obs': head[prefix + 'DATE-OBS'],
}
aper_num_lst = []
aper_ord_lst = []
aper_pos_lst = []
aper_brt_lst = []
aper_wav_lst = []
for key, value in head.items():
m = re.match('^GAMSE BACKGROUNDLIGHT APERTURE (\d+) ORDER', key)
if m:
aper = int(m.group(1))
aper_num_lst.append(aper)
aper_ord_lst.append(value)
continue
m = re.match('^GAMSE BACKGROUNDLIGHT APERTURE (\d+) POSITION', key)
if m:
aper_pos_lst.append(value)
continue
m = re.match('^GAMSE BACKGROUNDLIGHT APERTURE (\d+) BRIGHTNESS', key)
if m:
aper_brt_lst.append(value)
continue
m = re.match('^GAMSE BACKGROUNDLIGHT APERTURE (\d+) WAVELENGTH', key)
if m:
aper_wav_lst.append(value)
continue
bkg_obj = BackgroundLight(
info = info,
header = head,
data = data,
aper_num_lst = np.array(aper_num_lst),
aper_ord_lst = np.array(aper_ord_lst),
aper_pos_lst = np.array(aper_pos_lst),
aper_brt_lst = np.array(aper_brt_lst),
aper_wav_lst = np.array(aper_wav_lst),
)
return bkg_obj
def find_xdisp_shift(self, bkg_obj):
"""Find the relative shift between this and the input background light
object.
Args:
bkg_obj ():
Returns:
*float*: Relative shift in pixel along the cross-dispersion
direction.
"""
common_ord_lst = [order for order in self.aper_ord_lst
if order in bkg_obj.aper_ord_lst]
pixel_shift_lst = [self.get_position(order=o)
- bkg_obj.get_position(order=o)
for o in common_ord_lst]
pixel_shift_lst = np.array(pixel_shift_lst)
return np.median(pixel_shift_lst)
def find_brightness_scale(self, bkg_obj):
"""Find the scale factor of the brightness between this and the input
background light object.
Args:
bkg_obj ():
Returns:
*float*: Scale factor of brightness.
"""
common_ord_lst = [order for order in self.aper_ord_lst
if order in bkg_obj.aper_ord_lst]
brt_lst1, brt_lst2 = [], []
for order in common_ord_lst:
brt1 = self.get_brightness(order=order)
brt2 = bkg_obj.get_brightness(order=order)
if brt1>0 and brt2>0:
brt_lst1.append(brt1)
brt_lst2.append(brt2)
fitfunc = lambda s: brt_lst2*s
errfunc = lambda s: brt_lst1 - fitfunc(s)
s0 = np.median(brt_lst1)/np.median(brt_lst2)
fitres = opt.least_squares(errfunc, s0)
s = fitres.x[0]
return s
class BackgroundFigureCommon(Figure):
"""Figure to plot the background correction.
"""
def __init__(self, *args, **kwargs):
Figure.__init__(self, *args, **kwargs)
self.canvas = FigureCanvasAgg(self)
def close(self):
plt.close(self)
def find_best_background(background_lst, background, fiber, objname, time,
objtype):
"""Find the best pre-saved background light from a list of backgrounds.
Args:
background_lst (list):
background ():
fiber (str):
objname (str):
time ():
objtype (str):
"""
if objname.lower() in ['comb', 'fp', 'thar']:
candidate_lst = []
shift_lst = []
scale_lst = []
# first round, seach for the SAME object in the SAME fiber
for bkg_obj in background_lst:
if bkg_obj.info['object'].lower() == objname.lower() \
and bkg_obj.info['fiber'] == fiber:
shift = background.find_xdisp_shift(bkg_obj)
scale = background.find_brightness_scale(bkg_obj)
shift_lst.append(shift)
scale_lst.append(scale)
candidate_lst.append(bkg_obj)
if len(candidate_lst)>0:
index = np.array(scale_lst).argmin()
# the minimum scale guarantees that the template has the best SNR
return candidate_lst[index]
# second round, search for the SAME object but in all possible fibers.
for bkg_obj in background_lst:
if bkg_obj.info['object'].lower() == objname.lower():
shift = background.find_xdisp_shift(bkg_obj)
scale = background.find_brightness_scale(bkg_obj)
shift_lst.append(shift)
scale_lst.append(scale)
candidate_lst.append(bkg_obj)
if len(candidate_lst)>0:
index= np.array(scale_lst).argmin()
# the minimum scale guarantees that the template has the best SNR
return candidate_lst[index]
return None
elif objtype == 'star':
candidate_lst = []
scale_lst = []
# first round, seach for the SAME object in the SAME fiber
for bkg_obj in background_lst:
if bkg_obj.info['object'].lower() == objname.lower() \
and bkg_obj.info['fiber'] == fiber:
scale = background.find_brightness_scale(bkg_obj)
scale_lst.append(scale)
candidate_lst.append(bkg_obj)
if len(candidate_lst)>0:
index = np.array(scale_lst).argmin()
return candidate_lst[index]
# second round, search for objects in the SAME fiber
for bkg_obj in background_lst:
if bkg_obj.info['objtype'] == objtype \
and bkg_obj.info['fiber'] == fiber:
scale = background.find_brightness_scale(bkg_obj)
scale_lst.append(scale)
candidate_lst.append(bkg_obj)
if len(candidate_lst)>0:
index = np.array(scale_lst).argmin()
return candidate_lst[index]
return None
else:
print('Warning: Unknow object type:', objtype)
return None
def select_background_from_database(path, **args):
# find the index file
shape = args.pop('shape')
fiber = args.pop('fiber')
direction = args.pop('direction')
objtype = args.pop('objtype', None)
obj = args.pop('obj', None)
spectype = args.pop('spectype', None)
logger.info('objtype={}, obj={}, spectype={}'.format(objtype, obj, spectype))
filename = os.path.join(path, 'index.dat')
table = Table.read(filename, format='ascii.fixed_width_two_line')
# first round
mask = table['objtype']==objtype
table = table[mask]
logger.info('mask={}'.format(mask))
if obj.lower().strip() == 'comb':
mask = table['object']=='comb'
table = table[mask]
m1 = table['shape']==str(shape)[1:-1]
m2 = table['fiber']==fiber
m3 = table['direction']==direction
score = np.int32(m1) + np.int32(m2) + np.int32(m3)
logger.debug('score={}'.format(score))
mask = score == score.max()
logger.debug('mask={}'.format(mask))
table = table[mask]
row = table[0]
logger.debug('selected {} (obj={}, fiber={})'.format(
row['fileid'], row['object'], row['fiber']))
elif objtype == 'star':
mask = []
for row in table:
if row['object'].lower()==obj:
mask.append(True)
else:
mask.append(False)
if sum(mask)>0:
table = table[mask]
row = table[0]
else:
row = table[0]
logger.debug('selected {} (obj={}, fiber={})'.format(
row['fileid'], row['object'], row['fiber']))
else:
return None
selected_fileid = row['fileid']
filename = os.path.join(path, 'bkg_{}.fits'.format(selected_fileid))
return BackgroundLight.read(filename)
|
the-stack_106_29484 | import inspect
import json
import logging
import urllib.error
from datetime import date, timedelta
from decimal import ROUND_HALF_UP, Decimal
import vat_moss.exchange_rates
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.serializers.json import DjangoJSONEncoder
from django.db import transaction
from django.db.models import Count
from django.dispatch import receiver
from django.utils import timezone
from django.utils.timezone import now
from django.utils.translation import pgettext, ugettext as _
from django_countries.fields import Country
from django_scopes import scope, scopes_disabled
from i18nfield.strings import LazyI18nString
from pretix.base.i18n import language
from pretix.base.models import (
Invoice, InvoiceAddress, InvoiceLine, Order, OrderPayment,
)
from pretix.base.models.tax import EU_CURRENCIES
from pretix.base.services.tasks import TransactionAwareTask
from pretix.base.settings import GlobalSettingsObject
from pretix.base.signals import periodic_task
from pretix.celery_app import app
from pretix.helpers.database import rolledback_transaction
from pretix.helpers.models import modelcopy
logger = logging.getLogger(__name__)
@transaction.atomic
def build_invoice(invoice: Invoice) -> Invoice:
lp = invoice.order.payments.last()
open_payment = None
if lp and lp.state not in (OrderPayment.PAYMENT_STATE_CONFIRMED, OrderPayment.PAYMENT_STATE_REFUNDED):
open_payment = lp
with language(invoice.locale):
invoice.invoice_from = invoice.event.settings.get('invoice_address_from')
invoice.invoice_from_name = invoice.event.settings.get('invoice_address_from_name')
invoice.invoice_from_zipcode = invoice.event.settings.get('invoice_address_from_zipcode')
invoice.invoice_from_city = invoice.event.settings.get('invoice_address_from_city')
invoice.invoice_from_country = invoice.event.settings.get('invoice_address_from_country')
invoice.invoice_from_tax_id = invoice.event.settings.get('invoice_address_from_tax_id')
invoice.invoice_from_vat_id = invoice.event.settings.get('invoice_address_from_vat_id')
introductory = invoice.event.settings.get('invoice_introductory_text', as_type=LazyI18nString)
additional = invoice.event.settings.get('invoice_additional_text', as_type=LazyI18nString)
footer = invoice.event.settings.get('invoice_footer_text', as_type=LazyI18nString)
if open_payment and open_payment.payment_provider:
if 'payment' in inspect.signature(open_payment.payment_provider.render_invoice_text).parameters:
payment = open_payment.payment_provider.render_invoice_text(invoice.order, open_payment)
else:
payment = open_payment.payment_provider.render_invoice_text(invoice.order)
elif invoice.order.status == Order.STATUS_PAID:
payment = pgettext('invoice', 'The payment for this invoice has already been received.')
else:
payment = ""
invoice.introductory_text = str(introductory).replace('\n', '<br />')
invoice.additional_text = str(additional).replace('\n', '<br />')
invoice.footer_text = str(footer)
invoice.payment_provider_text = str(payment).replace('\n', '<br />')
try:
ia = invoice.order.invoice_address
addr_template = pgettext("invoice", """{i.company}
{i.name}
{i.street}
{i.zipcode} {i.city} {state}
{country}""")
invoice.invoice_to = "\n".join(
a.strip() for a in addr_template.format(
i=ia,
country=ia.country.name if ia.country else ia.country_old,
state=ia.state_for_address
).split("\n") if a.strip()
)
invoice.internal_reference = ia.internal_reference
invoice.invoice_to_company = ia.company
invoice.invoice_to_name = ia.name
invoice.invoice_to_street = ia.street
invoice.invoice_to_zipcode = ia.zipcode
invoice.invoice_to_city = ia.city
invoice.invoice_to_country = ia.country
invoice.invoice_to_state = ia.state
invoice.invoice_to_beneficiary = ia.beneficiary
if ia.vat_id:
invoice.invoice_to += "\n" + pgettext("invoice", "VAT-ID: %s") % ia.vat_id
invoice.invoice_to_vat_id = ia.vat_id
cc = str(ia.country)
if cc in EU_CURRENCIES and EU_CURRENCIES[cc] != invoice.event.currency:
invoice.foreign_currency_display = EU_CURRENCIES[cc]
if settings.FETCH_ECB_RATES:
gs = GlobalSettingsObject()
rates_date = gs.settings.get('ecb_rates_date', as_type=date)
rates_dict = gs.settings.get('ecb_rates_dict', as_type=dict)
convert = (
rates_date and rates_dict and
rates_date > (now() - timedelta(days=7)).date() and
invoice.event.currency in rates_dict and
invoice.foreign_currency_display in rates_dict
)
if convert:
invoice.foreign_currency_rate = (
Decimal(rates_dict[invoice.foreign_currency_display])
/ Decimal(rates_dict[invoice.event.currency])
).quantize(Decimal('0.0001'), ROUND_HALF_UP)
invoice.foreign_currency_rate_date = rates_date
except InvoiceAddress.DoesNotExist:
ia = None
invoice.invoice_to = ""
invoice.file = None
invoice.save()
invoice.lines.all().delete()
positions = list(
invoice.order.positions.select_related('addon_to', 'item', 'tax_rule', 'subevent', 'variation').annotate(
addon_c=Count('addons')
).prefetch_related('answers', 'answers__question').order_by('positionid', 'id')
)
reverse_charge = False
positions.sort(key=lambda p: p.sort_key)
for i, p in enumerate(positions):
if not invoice.event.settings.invoice_include_free and p.price == Decimal('0.00') and not p.addon_c:
continue
desc = str(p.item.name)
if p.variation:
desc += " - " + str(p.variation.value)
if p.addon_to_id:
desc = " + " + desc
if invoice.event.settings.invoice_attendee_name and p.attendee_name:
desc += "<br />" + pgettext("invoice", "Attendee: {name}").format(name=p.attendee_name)
for answ in p.answers.all():
if not answ.question.print_on_invoice:
continue
desc += "<br />{}{} {}".format(
answ.question.question,
"" if str(answ.question.question).endswith("?") else ":",
str(answ)
)
if invoice.event.has_subevents:
desc += "<br />" + pgettext("subevent", "Date: {}").format(p.subevent)
InvoiceLine.objects.create(
position=i, invoice=invoice, description=desc,
gross_value=p.price, tax_value=p.tax_value,
subevent=p.subevent, event_date_from=(p.subevent.date_from if p.subevent else invoice.event.date_from),
tax_rate=p.tax_rate, tax_name=p.tax_rule.name if p.tax_rule else ''
)
if p.tax_rule and p.tax_rule.is_reverse_charge(ia) and p.price and not p.tax_value:
reverse_charge = True
if reverse_charge:
if invoice.additional_text:
invoice.additional_text += "<br /><br />"
invoice.additional_text += pgettext(
"invoice",
"Reverse Charge: According to Article 194, 196 of Council Directive 2006/112/EEC, VAT liability "
"rests with the service recipient."
)
invoice.reverse_charge = True
invoice.save()
offset = len(positions)
for i, fee in enumerate(invoice.order.fees.all()):
fee_title = _(fee.get_fee_type_display())
if fee.description:
fee_title += " - " + fee.description
InvoiceLine.objects.create(
position=i + offset,
invoice=invoice,
description=fee_title,
gross_value=fee.value,
tax_value=fee.tax_value,
tax_rate=fee.tax_rate,
tax_name=fee.tax_rule.name if fee.tax_rule else ''
)
return invoice
def build_cancellation(invoice: Invoice):
invoice.lines.all().delete()
for line in invoice.refers.lines.all():
line.pk = None
line.invoice = invoice
line.gross_value *= -1
line.tax_value *= -1
line.save()
return invoice
def generate_cancellation(invoice: Invoice, trigger_pdf=True):
if invoice.refered.exists():
raise ValueError("Invoice should not be canceled twice.")
cancellation = modelcopy(invoice)
cancellation.pk = None
cancellation.invoice_no = None
cancellation.prefix = None
cancellation.refers = invoice
cancellation.is_cancellation = True
cancellation.date = timezone.now().date()
cancellation.payment_provider_text = ''
cancellation.file = None
cancellation.save()
cancellation = build_cancellation(cancellation)
if trigger_pdf:
invoice_pdf(cancellation.pk)
return cancellation
def regenerate_invoice(invoice: Invoice):
if invoice.shredded:
return invoice
if invoice.is_cancellation:
invoice = build_cancellation(invoice)
else:
invoice = build_invoice(invoice)
invoice_pdf(invoice.pk)
return invoice
def generate_invoice(order: Order, trigger_pdf=True):
locale = order.event.settings.get('invoice_language', order.event.settings.locale)
if locale:
if locale == '__user__':
locale = order.locale or order.event.settings.locale
invoice = Invoice(
order=order,
event=order.event,
organizer=order.event.organizer,
date=timezone.now().date(),
locale=locale
)
invoice = build_invoice(invoice)
if trigger_pdf:
invoice_pdf(invoice.pk)
if order.status == Order.STATUS_CANCELED:
generate_cancellation(invoice, trigger_pdf)
return invoice
@app.task(base=TransactionAwareTask)
def invoice_pdf_task(invoice: int):
with scopes_disabled():
i = Invoice.objects.get(pk=invoice)
with scope(organizer=i.order.event.organizer):
if i.shredded:
return None
if i.file:
i.file.delete()
with language(i.locale):
fname, ftype, fcontent = i.event.invoice_renderer.generate(i)
i.file.save(fname, ContentFile(fcontent))
i.save()
return i.file.name
def invoice_qualified(order: Order):
if order.total == Decimal('0.00') or order.require_approval or \
order.sales_channel not in order.event.settings.get('invoice_generate_sales_channels'):
return False
return True
def invoice_pdf(*args, **kwargs):
# We call this task asynchroneously, because otherwise we run into conditions where
# the task worker tries to generate the PDF even before our database transaction
# was committed and therefore fails to find the invoice object. The invoice_pdf_task
# will prevent this kind of race condition.
invoice_pdf_task.apply_async(args=args, kwargs=kwargs)
class DummyRollbackException(Exception):
pass
def build_preview_invoice_pdf(event):
locale = event.settings.invoice_language
if not locale or locale == '__user__':
locale = event.settings.locale
with rolledback_transaction(), language(locale):
order = event.orders.create(status=Order.STATUS_PENDING, datetime=timezone.now(),
expires=timezone.now(), code="PREVIEW", total=119)
invoice = Invoice(
order=order, event=event, invoice_no="PREVIEW",
date=timezone.now().date(), locale=locale, organizer=event.organizer
)
invoice.invoice_from = event.settings.get('invoice_address_from')
invoice.invoice_from_name = invoice.event.settings.get('invoice_address_from_name')
invoice.invoice_from_zipcode = invoice.event.settings.get('invoice_address_from_zipcode')
invoice.invoice_from_city = invoice.event.settings.get('invoice_address_from_city')
invoice.invoice_from_country = invoice.event.settings.get('invoice_address_from_country')
invoice.invoice_from_tax_id = invoice.event.settings.get('invoice_address_from_tax_id')
invoice.invoice_from_vat_id = invoice.event.settings.get('invoice_address_from_vat_id')
introductory = event.settings.get('invoice_introductory_text', as_type=LazyI18nString)
additional = event.settings.get('invoice_additional_text', as_type=LazyI18nString)
footer = event.settings.get('invoice_footer_text', as_type=LazyI18nString)
payment = _("A payment provider specific text might appear here.")
invoice.introductory_text = str(introductory).replace('\n', '<br />')
invoice.additional_text = str(additional).replace('\n', '<br />')
invoice.footer_text = str(footer)
invoice.payment_provider_text = str(payment).replace('\n', '<br />')
invoice.invoice_to_name = _("John Doe")
invoice.invoice_to_street = _("214th Example Street")
invoice.invoice_to_zipcode = _("012345")
invoice.invoice_to_city = _('Sample city')
invoice.invoice_to_country = Country('DE')
invoice.invoice_to = '{}\n{}\n{} {}'.format(
invoice.invoice_to_name, invoice.invoice_to_street,
invoice.invoice_to_zipcode, invoice.invoice_to_city
)
invoice.invoice_to_beneficiary = ''
invoice.file = None
invoice.save()
invoice.lines.all().delete()
if event.tax_rules.exists():
for i, tr in enumerate(event.tax_rules.all()):
tax = tr.tax(Decimal('100.00'))
InvoiceLine.objects.create(
invoice=invoice, description=_("Sample product {}").format(i + 1),
gross_value=tax.gross, tax_value=tax.tax,
tax_rate=tax.rate
)
else:
InvoiceLine.objects.create(
invoice=invoice, description=_("Sample product A"),
gross_value=100, tax_value=0, tax_rate=0
)
return event.invoice_renderer.generate(invoice)
@receiver(signal=periodic_task)
def fetch_ecb_rates(sender, **kwargs):
if not settings.FETCH_ECB_RATES:
return
gs = GlobalSettingsObject()
if gs.settings.ecb_rates_date == now().strftime("%Y-%m-%d"):
return
try:
date, rates = vat_moss.exchange_rates.fetch()
gs.settings.ecb_rates_date = date
gs.settings.ecb_rates_dict = json.dumps(rates, cls=DjangoJSONEncoder)
except urllib.error.URLError:
logger.exception('Could not retrieve rates from ECB')
|
the-stack_106_29487 | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C11_cff import Phase2C11
process = cms.Process("HGCalParametersTest",Phase2C11)
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
#process.load("Geometry.CMSCommonData.cmsExtendedGeometry2026D71XML_cfi")
process.load("Geometry.HGCalCommonData.testHGCalV14XML_cfi")
#process.load("Geometry.HGCalCommonData.testHGCXML_cfi")
process.load("Geometry.HGCalCommonData.hgcalParametersInitialization_cfi")
process.load('FWCore.MessageService.MessageLogger_cfi')
if hasattr(process,'MessageLogger'):
process.MessageLogger.HGCalGeom=dict()
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.testEE = cms.EDAnalyzer("HGCalParameterTester",
Name = cms.untracked.string("HGCalEESensitive"),
Mode = cms.untracked.int32(1)
# Mode = cms.untracked.int32(0)
)
process.testHESil = process.testEE.clone(
Name = cms.untracked.string("HGCalHESiliconSensitive")
)
process.testHESci = process.testEE.clone(
Name = cms.untracked.string("HGCalHEScintillatorSensitive"),
Mode = cms.untracked.int32(2)
)
process.p1 = cms.Path(process.generator*process.testEE*process.testHESil*process.testHESci)
#process.p1 = cms.Path(process.generator*process.testEE*process.testHESil)
|
the-stack_106_29490 | # Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from google.protobuf.json_format import MessageToJson
from tabulate import tabulate
from bentoml.cli.click_utils import CLI_COLOR_ERROR, _echo, parse_bento_tag_callback
from bentoml.cli.utils import humanfriendly_age_from_datetime
from bentoml.proto import status_pb2
from bentoml.utils import pb_to_yaml, status_pb_to_error_code_and_message
from bentoml.utils.usage_stats import track_cli
from bentoml.yatai.client import YataiClient
def _print_bento_info(bento, output_type):
if output_type == 'yaml':
_echo(pb_to_yaml(bento))
else:
_echo(MessageToJson(bento))
def _print_bento_table(bentos, wide=False):
table = []
if wide:
headers = ['BENTO_SERVICE', 'CREATED_AT', 'APIS', 'ARTIFACTS', 'URI']
else:
headers = ['BENTO_SERVICE', 'AGE', 'APIS', 'ARTIFACTS']
for bento in bentos:
artifacts = [
f'{artifact.name}<{artifact.artifact_type}>'
for artifact in bento.bento_service_metadata.artifacts
]
apis = [
f'{api.name}<{api.handler_type}>'
for api in bento.bento_service_metadata.apis
]
if wide:
created_at = bento.bento_service_metadata.created_at.ToDatetime().strftime(
"%Y-%m-%d %H:%M"
)
else:
created_at = humanfriendly_age_from_datetime(
bento.bento_service_metadata.created_at.ToDatetime()
)
row = [
f'{bento.name}:{bento.version}',
created_at,
', '.join(apis),
', '.join(artifacts),
]
if wide:
row.append(bento.uri.uri)
table.append(row)
table_display = tabulate(table, headers, tablefmt='plain')
_echo(table_display)
def _print_bentos_info(bentos, output_type):
if output_type == 'table':
_print_bento_table(bentos)
elif output_type == 'wide':
_print_bento_table(bentos, wide=True)
else:
for bento in bentos:
_print_bento_info(bento, output_type)
def add_bento_sub_command(cli):
# pylint: disable=unused-variable
@cli.command(help='Get BentoService information')
@click.argument('bento', type=click.STRING)
@click.option(
'--limit', type=click.INT, help='Limit how many resources will be retrieved'
)
@click.option('--ascending-order', is_flag=True)
@click.option(
'-o', '--output', type=click.Choice(['json', 'yaml', 'table', 'wide'])
)
def get(bento, limit, ascending_order, output):
if ':' in bento:
name, version = bento.split(':')
else:
name = bento
version = None
yatai_client = YataiClient()
if name and version:
track_cli('bento-get')
output = output or 'json'
get_bento_result = yatai_client.repository.get(name, version)
if get_bento_result.status.status_code != status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
get_bento_result.status
)
_echo(
f'BentoService {name}:{version} not found - '
f'{error_code}:{error_message}',
CLI_COLOR_ERROR,
)
return
_print_bento_info(get_bento_result.bento, output)
return
elif name:
track_cli('bento-list')
output = output or 'table'
list_bento_versions_result = yatai_client.repository.list(
bento_name=name, limit=limit, ascending_order=ascending_order
)
if list_bento_versions_result.status.status_code != status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
list_bento_versions_result.status
)
_echo(
f'Failed to list versions for BentoService {name} '
f'{error_code}:{error_message}',
CLI_COLOR_ERROR,
)
return
_print_bentos_info(list_bento_versions_result.bentos, output)
@cli.command(name='list', help='List BentoServices information')
@click.option(
'--limit', type=click.INT, help='Limit how many BentoServices will be retrieved'
)
@click.option(
'--offset', type=click.INT, help='How many BentoServices will be skipped'
)
@click.option(
'--order-by', type=click.Choice(['created_at', 'name']), default='created_at',
)
@click.option('--ascending-order', is_flag=True)
@click.option(
'-o',
'--output',
type=click.Choice(['json', 'yaml', 'table', 'wide']),
default='table',
)
def list_bentos(limit, offset, order_by, ascending_order, output):
yatai_client = YataiClient()
track_cli('bento-list')
list_bentos_result = yatai_client.repository.list(
limit=limit,
offset=offset,
order_by=order_by,
ascending_order=ascending_order,
)
if list_bentos_result.status.status_code != status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
list_bentos_result.status
)
_echo(
f'Failed to list BentoServices ' f'{error_code}:{error_message}',
CLI_COLOR_ERROR,
)
return
_print_bentos_info(list_bentos_result.bentos, output)
@cli.command()
@click.argument("bento", type=click.STRING, callback=parse_bento_tag_callback)
@click.option(
'-y', '--yes', '--assume-yes', is_flag=True, help='Automatic yes to prompts'
)
def delete(bento, yes):
"""Delete saved BentoService.
BENTO is the target BentoService to be deleted, referenced by its name and
version in format of name:version. For example: "iris_classifier:v1.2.0"
"""
yatai_client = YataiClient()
name, version = bento.split(':')
if not name and not version:
_echo(
'BentoService name or version is missing. Please provide in the '
'format of name:version',
CLI_COLOR_ERROR,
)
return
if not yes and not click.confirm(
f'Are you sure about delete {bento}? This will delete the BentoService '
f'saved bundle files permanently'
):
return
result = yatai_client.repository.dangerously_delete_bento(
name=name, version=version
)
if result.status.status_code != status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
result.status
)
_echo(
f'Failed to delete Bento {name}:{version} '
f'{error_code}:{error_message}',
CLI_COLOR_ERROR,
)
return
_echo(f'BentoService {name}:{version} deleted')
|
the-stack_106_29491 | from django.test import SimpleTestCase
from ..services.chinese import LACChineseTokenizer
class LACChineseTokenizerTests(SimpleTestCase):
def test_prepare_for_segmentation(self):
"""
prepare_for_segmentation() strips carriage returns from input (for
better LAC segmentation)
"""
input_text = "山\r\n月"
self.assertEqual(
LACChineseTokenizer(lang="zho").prepare_for_segmentation(input_text), "山\n月"
)
def test_re_tokenize_split_on_newlines(self):
"""
re_tokenize() splits tokens on new lines (\n)
- lemma tokens containing "\n" are treated as separate lemmas and the
"\n" is kept as a distinct non-lemma token
"""
tokenized_input = [
"哈哈\n\n", # newlines should be split from token but retained in token stream
"\n哈哈", # newline should be split from token but retained in token stream
"哈",
"哈\n哈\n哈", # newlines should be split from token (and retained), causing token to split into three separate lemma tokens
]
self.assertEqual(
list(LACChineseTokenizer(lang="zho").re_tokenize(tokenized_input)),
[
"哈哈",
"\n",
"\n",
"\n",
"哈哈",
"哈",
"哈",
"\n",
"哈",
"\n",
"哈",
],
)
def test_re_tokenize_split_on_pipes(self):
"""
re_tokenize() splits tokens on pipes (|)
- lemma tokens containing "|" are treated as separate lemmas and the
"|" is discarded
"""
tokenized_input = [
"華盛頓|郵報", # should split into two tokens with pipe discarded
"/",
"華盛頓",
"|", # should be removed, as its two neighbors are already split
"郵報",
"|哈|哈|哈|", # multiples should split this into three tokens,
]
self.assertEqual(
list(LACChineseTokenizer(lang="zho").re_tokenize(tokenized_input)),
[
"華盛頓",
"郵報",
"/",
"華盛頓",
"郵報",
"哈",
"哈",
"哈",
],
)
def test_chinese_triples(self):
"""
get_triples() converts single-token stream into stream of triples:
- non-lemma tokens preceding first lemma are added as "following"
to an empty initial token/lemma
- normalized lemma is simply the original lemma (no normalization)
- non-lemma tokens such as punctuation and spacing are added as
"following" to previous lemma
"""
# pre-tokenized text: `“舉頭望山月, 低頭思故鄉。”`
tokenized_input = [
"“", # (Following)
"舉頭", # new token
"望", # new token
"山月", # new token
",", # (Following)
" ", # (Following)
"低頭", # new token
"思", # new token
"故鄉", # new token
"。", # (Following)
"”", # (Following)
]
triples = list(LACChineseTokenizer(lang="zho").get_triples(tokenized_input))
self.assertEqual(
triples,
[
("", "", "“"),
("舉頭", "舉頭", ""),
("望", "望", ""),
("山月", "山月", ", "),
("低頭", "低頭", ""),
("思", "思", ""),
("故鄉", "故鄉", "。”"),
],
)
def test_get_triples_combining_tokens(self):
"""
get_triples() converts single-token stream into stream of triples with
tokens explicitly marked for combining are a single output triple
- prefixed or suffixed "_" characters cause previous or following
tokens to join into a single lemma and the "_" is discarded
"""
# pre-tokenized text: `哈_哈_哈。哈哈哈!`
tokenized_input = [
"哈_哈", # new token
"_", # continuation of previous token
"哈", # continuation of previous token
"。", # (following)
"哈", # new token
"哈_", # new token
"哈", # continuation of previous token
"!", # (following)
]
triples = list(LACChineseTokenizer(lang="zho").get_triples(tokenized_input))
self.assertEqual(
triples,
[
("哈哈哈", "哈哈哈", "。"),
("哈", "哈", ""),
("哈哈", "哈哈", "!"),
],
)
def test_tokenizer(self):
"""
Chinese text strings are properly segmented and typical punctuation and
spaces are rendered as 'following' text.
"""
text_input = "“舉頭望山月, 低頭思故鄉。”"
output = LACChineseTokenizer(lang="zho").tokenize(text_input)
self.assertEqual(
list(output),
[
("", "", "“"),
("舉頭", "舉頭", ""),
("望", "望", ""),
("山月", "山月", ", "),
("低頭", "低頭", ""),
("思", "思", ""),
("故鄉", "故鄉", "。”"),
],
)
def test_tokenizer_latin_characters(self):
"""
Latin characters are considered non-lemma characters
(In practice this means that some mixed-language text will not
be looked up in the lattice / treated as lemmas, such as
3Q ("thank you"), 2019冠狀病毒病 ("COVID-19"))
"""
text_input = "2019冠狀病毒病/3Q/K書"
output = LACChineseTokenizer(lang="zho").tokenize(text_input)
self.assertEqual(
list(output),
[
("", "", "2019"),
("冠狀病毒病", "冠狀病毒病", "/3Q/K"),
("書", "書", ""),
],
)
def test_tokenizer_unicode_normalization(self):
"""
normalize_chinese() normalizes full-width pipe and underscore characters,
and performs some CJK character normalization
"""
lac_tokenizer = LACChineseTokenizer(lang="zho")
# the first character is an alternate form of the second character, but they should not be transformed by NFC
text_input_no_change = "⺼!=月 / ,!=, / 。!=."
self.assertEqual(
lac_tokenizer.normalize_chinese(text_input_no_change),
text_input_no_change,
)
# the first character in each pair is the full-width version, the second is the half-width (ASCII) version
text_input_full_width_punctuation_should_change = "\uFF5C==| / \uFF3F==_"
self.assertEqual(
lac_tokenizer.normalize_chinese(text_input_full_width_punctuation_should_change),
# this is the same string, only with full-width converted to half-width
"|==| / _==_",
)
# the first character is the CJK Compatibility Ideograph \uF9D1, the second is the canonical Unified Ideograph \u516D
text_input_compatibility_change = "六==六"
self.assertEqual(
lac_tokenizer.normalize_chinese(text_input_compatibility_change),
# this is the same string, only with compatibility chars transformed to canonical
"六==六",
)
|
the-stack_106_29494 | import logging
import os
from collections import OrderedDict
from typing import Union, TextIO, Optional, Set, List, cast, Dict, Mapping, Tuple, Iterator
from urllib.parse import urlparse
from jsonasobj2 import values
from linkml_runtime.linkml_model.meta import SchemaDefinition, SlotDefinition, SlotDefinitionName, ClassDefinition, \
ClassDefinitionName, TypeDefinitionName, TypeDefinition, ElementName, EnumDefinition, EnumDefinitionName
from linkml_runtime.utils.context_utils import parse_import_map
from linkml_runtime.utils.formatutils import underscore, camelcase, sfx, mangled_attribute_name
from linkml_runtime.utils.metamodelcore import Bool
from linkml_runtime.utils.namespaces import Namespaces
from linkml_runtime.utils.yamlutils import TypedNode
from linkml.utils.mergeutils import merge_schemas, merge_slots, merge_classes, slot_usage_name
from linkml.utils.rawloader import load_raw_schema
from linkml.utils.schemasynopsis import SchemaSynopsis
class SchemaLoader:
def __init__(self,
data: Union[str, TextIO, SchemaDefinition, dict],
base_dir: Optional[str] = None,
namespaces: Optional[Namespaces] = None,
useuris: Optional[bool] = None,
importmap: Optional[Mapping[str, str]] = None,
logger: Optional[logging.Logger] = None,
mergeimports: Optional[bool] = True,
emit_metadata: Optional[bool] = True,
source_file_date: Optional[str] = None,
source_file_size: Optional[int] = None) \
-> None:
""" Constructor - load and process a YAML or pre-processed schema
:param data: YAML schema text, python dict loaded from yaml, URL, file name, open file or SchemaDefinition
:param base_dir: base directory or URL where Schema came from
:param namespaces: namespaces collector
:param useuris: True means class_uri and slot_uri are identifiers. False means they are mappings.
:param importmap: A map from import entries to URI or file name.
:param logger: Target Logger, if any
:param mergeimports: True means combine imports into single package. False means separate packages
:param emit_metadata: True means include source file, size and date
:param source_file_date: modification of source file
:param source_file_size: size of source file
"""
self.logger = logger if logger is not None else logging.getLogger(self.__class__.__name__)
if isinstance(data, SchemaDefinition):
self.schema = data
else:
self.schema = load_raw_schema(data, base_dir=base_dir, merge_modules=mergeimports,
source_file_date=source_file_date,
source_file_size=source_file_size)
# Map from URI to source and version tuple
self.loaded: OrderedDict[str, Tuple[str, str]] = {self.schema.id: (self.schema.source_file, self.schema.version)}
self.base_dir = self._get_base_dir(base_dir)
self.namespaces = namespaces if namespaces else Namespaces()
self.useuris = useuris if useuris is not None else True
self.importmap = parse_import_map(importmap, self.base_dir) if importmap is not None else dict()
self.source_file_date = source_file_date
self.source_file_size = source_file_size
self.synopsis: Optional[SchemaSynopsis] = None
self.schema_location: Optional[str] = None
self.schema_defaults: Dict[str, str] = {} # Map from schema URI to default namespace
self.merge_modules = mergeimports
self.emit_metadata = emit_metadata
def resolve(self) -> SchemaDefinition:
"""Reconcile a loaded schema, applying is_a, mixins, apply_to's and other such things. Also validate the
content and load a SchemaSynopsis entry
:return: Fully resolved definition
"""
if not self.schema.default_range:
self.schema.default_range = 'string'
self.logger.info(f"Default_range not specified. Default set to '{self.schema.default_range}'")
# Process the namespace declarations
if not self.schema.default_prefix:
self.schema.default_prefix = sfx(self.schema.id)
self.schema_defaults[self.schema.id] = self.schema.default_prefix
for prefix in self.schema.prefixes.values():
self.namespaces[prefix.prefix_prefix] = prefix.prefix_reference
for cmap in self.schema.default_curi_maps:
self.namespaces.add_prefixmap(cmap, include_defaults=False)
if not self.namespaces._default:
if '://' in self.schema.default_prefix:
self.namespaces._default = self.schema.default_prefix
elif self.schema.default_prefix in self.namespaces:
self.namespaces._default = self.namespaces[self.schema.default_prefix]
else:
self.raise_value_error(f'Default prefix: {self.schema.default_prefix} is not defined',
self.schema.default_prefix)
# Process imports
for imp in self.schema.imports:
sname = self.importmap.get(str(imp), imp) # Import map may use CURIE
sname = self.namespaces.uri_for(sname) if ':' in sname else sname
sname = self.importmap.get(str(sname), sname) # It may also use URI or other forms
import_schemadefinition = \
load_raw_schema(sname + '.yaml',
base_dir=os.path.dirname(self.schema.source_file) if self.schema.source_file else None,
merge_modules=self.merge_modules, emit_metadata=self.emit_metadata)
loaded_schema = (str(sname), import_schemadefinition.version)
if import_schemadefinition.id in self.loaded:
# If we've already loaded this, make sure that we've got the same version
if self.loaded[import_schemadefinition.id][1] != loaded_schema[1]:
self.raise_value_error(f"Schema {import_schemadefinition.name} - version mismatch",
import_schemadefinition.name)
# Note: for debugging purposes we also check whether the version came from the same spot. This should
# be loosened to version only once we're sure that everything is working
# TODO: The test below needs review -- there are cases where it fails because self.loaded[...][0] has the
# full path name and loaded_schema[0] is just the local name
# if self.loaded[import_schemadefinition.id] != loaded_schema:
# self.raise_value_error(f"Schema imported from different files: "
# f"{self.loaded[import_schemadefinition.id][0]} : {loaded_schema[0]}")
else:
self.loaded[import_schemadefinition.id] = loaded_schema
merge_schemas(self.schema, import_schemadefinition, imp, self.namespaces,
merge_imports=self.merge_modules)
self.schema_defaults[import_schemadefinition.id] = import_schemadefinition.default_prefix
self.namespaces._base = self.schema.default_prefix if ':' in self.schema.default_prefix else \
self.namespaces[self.schema.default_prefix]
# Promote embedded attribute definitions to first class slots.
for cls in self.schema.classes.values():
for attribute in cls.attributes.values():
mangled_slot_name = mangled_attribute_name(cls.name, attribute.name)
if mangled_slot_name in self.schema.slots:
self.raise_value_error(f'Class: "{cls.name}" attribute "{attribute.name}" - '
f'mangled name: {mangled_slot_name} already exists', attribute.name)
new_slot = SlotDefinition(**attribute.__dict__)
new_slot.domain_of.append(cls.name)
new_slot.imported_from = cls.imported_from
if not new_slot.alias:
new_slot.alias = attribute.name
new_slot.name = mangled_slot_name
self.schema.slots[new_slot.name] = new_slot
cls.slots.append(mangled_slot_name)
# Assign class slot ownership
for cls in self.schema.classes.values():
if not isinstance(cls, ClassDefinition):
name = cls['name'] if 'name' in cls else 'Unknown'
self.raise_value_error(f'Class "{name} (type: {type(cls)})" definition is not a class definition', name)
if isinstance(cls.slots, str):
self.logger.warning(f"File: {self.schema.source_file} Class: {cls.name} Slots are not an array")
cls.slots = [cls.slots]
for slotname in cls.slots:
if slotname in self.schema.slots:
slot = self.schema.slots[cast(SlotDefinitionName, slotname)]
slot.owner = cls.name
if cls.name not in slot.domain_of:
slot.domain_of.append(cls.name)
else:
self.raise_value_error(f'Class "{cls.name}" - unknown slot: "{slotname}"', slotname)
# Process slots defined as slot usages
self.process_slot_usage_definitions()
# Massage initial set of slots
for slot in self.schema.slots.values():
# Propagate domain to containing class
if slot.domain and slot.domain in self.schema.classes:
if slot.name not in self.schema.classes[slot.domain].slots:
slot.owner = slot.name
# self.schema.classes[slot.domain].slots.append(slot.name)
elif slot.domain:
self.raise_value_error(f"slot: {slot.name} - unrecognized domain ({slot.domain})", slot.domain)
# Validate the slot range
if slot.range is not None and slot.range not in self.schema.types and \
slot.range not in self.schema.classes and slot.range not in self.schema.enums:
self.raise_value_error(f"slot: {slot.name} - unrecognized range ({slot.range})", slot.range)
# apply to --> mixins
for cls in self.schema.classes.values():
for apply_to_cls in cls.apply_to:
if apply_to_cls in self.schema.classes:
self.schema.classes[apply_to_cls].mixins.append(cls.name)
else:
self.raise_value_error(f'Class "{cls.name}" unknown apply_to target: {apply_to_cls}', apply_to_cls)
# Class URI's also count as (trivial) mappings
if cls.class_uri is not None:
cls.mappings.insert(0, cls.class_uri)
if cls.class_uri is None or not self.useuris:
cls.class_uri = \
self.namespaces.uri_or_curie_for(self.schema_defaults.get(cls.from_schema, sfx(cls.from_schema)),
camelcase(cls.name))
# Get the inverse ducks all in a row before we start filling other stuff in
for slot in self.schema.slots.values():
if slot.inverse:
inverse_slot = self.schema.slots.get(slot.inverse, None)
if inverse_slot:
if not inverse_slot.inverse:
inverse_slot.inverse = slot.name
elif inverse_slot.inverse != slot.name:
self.raise_value_error(f'Slot {slot.name}.inverse ({slot.inverse}) does not match '
f'slot {inverse_slot.name}.inverse ({inverse_slot.inverse})')
else:
self.raise_value_error(f'Slot {slot.name}.inverse ({slot.inverse}) is not defined')
# Update slots with parental information
merged_slots: List[SlotDefinitionName] = []
for slot in self.schema.slots.values():
if not slot.from_schema:
slot.from_schema = self.schema.id
self.merge_slot(slot, merged_slots)
# Add default ranges
if slot.range is None:
# Inverses will be handled later on in the process
if not slot.inverse:
slot.range = self.schema.default_range
# Update enums
merged_enums: List[EnumDefinitionName] = []
for enum in self.schema.enums.values():
if not enum.from_schema:
enum.from_schema = self.schema.id
# TODO: Need to add "is_a" to enums
# self.merge_enum(enum, merged_enums)
# Process the slot_usages
for cls in self.schema.classes.values():
self.process_slot_usages(cls)
if not cls.from_schema:
cls.from_schema = self.schema.id
# Merge class with its mixins and the like
merged_classes: List[ClassDefinitionName] = []
for cls in self.schema.classes.values():
self.merge_class(cls, merged_classes)
# Update types with parental information
merged_types: List[TypeDefinitionName] = []
for typ in self.schema.types.values():
if not typ.base and not typ.typeof:
self.raise_value_error(f'type "{typ.name}" must declare a type base or parent (typeof)', typ.name)
if not typ.typeof and not typ.uri:
self.raise_value_error(f'type "{typ.name}" does not declare a URI', typ.name)
self.merge_type(typ, merged_types)
if not typ.from_schema:
typ.from_schema = self.schema.id
# Update the subsets as needed
for ss in self.schema.subsets.values():
if not ss.from_schema:
ss.from_schema = self.schema.id
# Massage initial set of slots
for slot in self.schema.slots.values():
# Keys and identifiers must be present
if bool(slot.key or slot.identifier):
if slot.required is None:
slot.required = True
elif not slot.required:
self.raise_value_error(f"slot: {slot.name} - key and identifier slots cannot be optional", slot.name)
if slot.key and slot.identifier:
self.raise_value_error(f"slot: {slot.name} - A slot cannot be both a key and identifier at the same time", slot.name)
# Propagate domain to containing class
if slot.domain and slot.domain in self.schema.classes:
if slot.name not in self.schema.classes[slot.domain].slots and not slot.owner:
slot.owner = slot.name
# Slot domains to not appear
# self.schema.classes[slot.domain].slots.append(slot.name)
elif slot.domain:
self.raise_value_error(f"slot: {slot.name} - unrecognized domain ({slot.domain})", slot.domain)
if slot.ifabsent:
from linkml.utils.ifabsent_functions import isabsent_match
if isabsent_match(slot.ifabsent) is None:
self.raise_value_error(f"Unrecognized ifabsent action for slot '{slot.name}': '{slot.ifabsent}'", slot.ifabsent)
# Keys and identifiers must be present
if bool(slot.key or slot.identifier):
if slot.required is None:
slot.required = True
elif not slot.required:
self.raise_value_error(f"slot: {slot.name} - key and identifier slots cannot be optional", slot.name)
# Validate the slot range
if slot.range is not None and slot.range not in self.schema.types and \
slot.range not in self.schema.classes and slot.range not in self.schema.enums:
self.raise_value_error(f"slot: {slot.name} - unrecognized range ({slot.range})", slot.range)
# Massage classes, propagating class slots entries domain back to the target slots
for cls in self.schema.classes.values():
if not isinstance(cls, ClassDefinition):
name = cls['name'] if 'name' in cls else 'Unknown'
self.raise_value_error(f'Class "{name} (type: {type(cls)})" definition is not a class definition')
if isinstance(cls.slots, str):
self.logger.warning(f"File: {self.schema.source_file} Class: {cls.name} Slots are not an array")
cls.slots = [cls.slots]
for slotname in cls.slots:
if slotname in self.schema.slots:
slot = self.schema.slots[cast(SlotDefinitionName, slotname)]
else:
self.raise_value_error(f'Class "{cls.name}" - unknown slot: "{slotname}"', slotname)
for slot in self.schema.slots.values():
if slot.from_schema is None:
slot.from_schema = self.schema.id
# Inline any class definitions that don't have identifiers. Note that keys ARE inlined
if slot.range in self.schema.classes:
range_class = self.schema.classes[cast(ClassDefinitionName, slot.range)]
if slot.inlined_as_list or not any([self.schema.slots[s].identifier or
self.schema.slots[s].key for s in range_class.slots]):
slot.inlined = True
if slot.slot_uri is not None:
slot.mappings.insert(0, slot.slot_uri)
# Assign missing predicates
if slot.slot_uri is None or not self.useuris:
slot.slot_uri = \
self.namespaces.uri_or_curie_for(self.schema_defaults.get(slot.from_schema, sfx(slot.from_schema)),
self.slot_name_for(slot))
if slot.subproperty_of and slot.subproperty_of not in self.schema.slots:
self.raise_value_error(f'Slot: "{slot.name}" - subproperty_of: "{slot.subproperty_of}" '
f'does not reference a slot definition', slot.subproperty_of)
# Evaluate any slot inverses
def domain_range_alignment(fwd_slot: SlotDefinition, inverse_slot: SlotDefinition) -> bool:
""" Determine whether the range of fwd_slot is compatible with the domain of inverse_slot """
# TODO: Determine what to do about class and slot hierarchy
if fwd_slot.range and fwd_slot.range not in self.schema.classes:
raise ValueError(f"Slot '{fwd_slot.name}' range ({fwd_slot.range}) is not an class -- inverse is not possible")
if fwd_slot.domain:
if not inverse_slot.range:
inverse_slot.range = fwd_slot.domain
elif not domain_range_alignment(fwd_slot, inverse_slot):
self.logger.warning(f"Slot: {slot.name} and inverse slot: {inverse_slot.name} are not compatible")
return True
# Get the inverse domains and ranges sorted
for slot in self.schema.slots.values():
if slot.inverse:
# Note that the inverse OF the inverse will be caught in this same iterator
inverse_slot = self.schema.slots[slot.inverse]
if not slot.range:
if inverse_slot.domain:
slot.range = inverse_slot.domain
elif len(inverse_slot.domain_of):
if len(inverse_slot.domain_of) > 1:
dom_list = ', '.join(inverse_slot.domain_of)
self.logger.warning(f"Slot {slot.name}.inverse ({inverse_slot.name}), "
f"has multi domains ({dom_list}) Multi ranges not yet implemented")
slot.range = inverse_slot.domain_of[0]
else:
raise ValueError(f"Unable to determine the range of slot `{slot.name}'. "
f"Its inverse ({inverse_slot.name}) has no declared domain")
elif not inverse_slot.domain and len(inverse_slot.domain_of) == 0:
inverse_slot.domain = slot.range
elif slot.range not in (inverse_slot.domain, inverse_slot.domain_of):
self.logger.warning(f"Range of slot '{slot.name}' ({slot.range}) "
f"does not line with the domain of its inverse ({inverse_slot.name})")
# Check for duplicate class and type names
def check_dups(s1: Set[ElementName], s2: Set[ElementName]) -> Tuple[List[ElementName], str]:
if s1.isdisjoint(s2):
return [], ''
# Return an ordered list of d1/d1 tuples
# For some curious reason, s1.intersection(s2) and s2.intersection(s1) BOTH yield s1 elements
dups = sorted(s1.intersection(s2))
dup_locs = list()
for dup in dups:
dup_locs += [s1e for s1e in s1 if s1e == dup]
dup_locs += [s2e for s2e in s2 if s2e == dup]
return dup_locs, ', '.join(dups)
classes = set(self.schema.classes.keys())
self.validate_item_names("class", classes)
slots = set(self.schema.slots.keys())
self.validate_item_names("slot", slots)
types = set(self.schema.types.keys())
self.validate_item_names("type", types)
subsets = set(self.schema.subsets.keys())
self.validate_item_names("subset", subsets)
enums = set(self.schema.enums.keys())
self.validate_item_names('enum', enums)
# Check that the default range is valid
default_range_needed = any(slot.range == self.schema.default_range for slot in self.schema.slots.values())
if default_range_needed and \
self.schema.default_range not in self.schema.types and \
self.schema.default_range not in self.schema.classes:
raise ValueError(f'Unknown default range: "{self.schema.default_range}"')
# We are currently limited to one key per class
for cls in self.schema.classes.values():
class_slots = []
for sn in cls.slots:
slot = self.schema.slots[sn]
if slot.key or slot.identifier:
class_slots.append(sn)
if len(class_slots) > 1:
self.raise_value_error(f'Class "{cls.name}" - multiple keys/identifiers not allowed ({", ".join(class_slots)})', class_slots[1])
# Check out all the namespaces
self.check_prefixes()
# Cannot have duplicate class or type keys
dups, items = check_dups(types, classes)
if items:
self.raise_value_errors(f"Overlapping type and class names: {items}", dups)
dups, items = check_dups(enums, classes)
if items:
self.raise_value_errors(f"Overlapping enum and class names: {items}", dups)
dups, items = check_dups(types, enums)
if items:
self.raise_value_errors(f"Overlapping type and enum names: {items}", dups)
dups, items = check_dups(slots, classes)
if items:
self.logger_warning(f"Overlapping slot and class names: {items}", dups)
dups, items = check_dups(subsets, classes)
if items:
self.logger_warning(f"Overlapping subset and class names: {items}", dups)
dups, items = check_dups(types, slots)
if items:
self.logger_warning(f"Overlapping type and slot names: {items}", dups)
dups, items = check_dups(subsets, slots)
if items:
self.logger_warning(f"Overlapping subset and slot names: {items}", dups)
dups, items = check_dups(subsets, types)
if items:
self.logger_warning(f"Overlapping subset and type names: {items}", dups)
dups, items = check_dups(enums, slots)
if items:
self.logger_warning(f"Overlapping enum and slot names: {items}", dups)
dups, items = check_dups(subsets, enums)
if items:
self.logger_warning(f"Overlapping subset and enum names: {items}", dups)
# Check over the various enumeration constraints
for enum in self.schema.enums.values():
if enum.code_set_version:
if enum.code_set_tag:
self.raise_value_errors(f'Enum: "{enum.name}" cannot have both version and tag',
[enum.code_set_version, enum.code_set_tag])
if not enum.code_set:
self.raise_value_error(f'Enum: "{enum.name}" needs a code set to have a version', enum.name)
if enum.code_set_tag:
if not enum.code_set:
self.raise_value_error(f'Enum: "{enum.name}" needs a code set to have a tag', enum.name)
if enum.pv_formula:
if not enum.code_set:
self.raise_value_error(f'Enum: "{enum.name}" needs a code set to have a formula', enum.name)
if enum.permissible_values:
self.raise_value_error(f'Enum: "{enum.name}" can have a formula or permissible values but not both',
enum.name)
for slot in self.schema.slots.values():
if slot.range and slot.range in self.schema.enums:
if slot.inlined or slot.inlined_as_list:
self.raise_value_error(f'Slot: "{slot.name}" enumerations cannot be inlined', slot.range)
# Make the source file relative if it is locally generated
self.schema_location = self.schema.source_file
if self.schema.source_file and '://' not in self.schema.source_file:
self.schema.source_file = os.path.basename(self.schema.source_file)
# Make sure there is only one tree_root
tree_root = None
for cls in self.schema.classes.values():
if cls.tree_root:
if tree_root is not None:
self.logger.warning(f"Duplicate tree_root: {cls.name} with {tree_root}")
else:
tree_root = cls.name
self.synopsis = SchemaSynopsis(self.schema)
errs = self.synopsis.errors()
if errs:
print("Warning: The following errors were encountered in the schema")
for errline in errs:
print("\t" + errline)
print()
for subset, referees in self.synopsis.subsetrefs.items():
if subset not in self.schema.subsets:
self.raise_value_error(f"Subset: {subset} is not defined", subset)
return self.schema
def validate_item_names(self, typ: str, names: List[str]) -> None:
# TODO: add a more rigorous syntax check for item names
for name in names:
if ':' in name:
raise self.raise_value_error(f'{typ}: "{name}" - ":" not allowed in identifier', name)
def merge_enum(self, enum: EnumDefinition, merged_enums: List[EnumDefinitionName]) -> None:
"""
Merge parent enumeration information into target enum
:param enum: target enumeration
:param merged_enums: list of enum names that have been merged. Used to do distal ancestor resolution
"""
if enum.name not in merged_enums:
merged_enums.append(enum.name)
if enum.is_a:
if enum.is_a in self.schema.enums:
self.merge_enum(self.schema.enums[enum.is_a], merged_enums)
# merge_enums(self.schema, enum, self.schema.enums[enum.is_a], False)
else:
self.raise_value_error(f'Enum: "{enum.name}" - unknown is_a reference: {enum.is_a}', enum.is_a)
def merge_slot(self, slot: SlotDefinition, merged_slots: List[SlotDefinitionName]) -> None:
"""
Merge parent slot information into target slot
:param slot: target slot
:param merged_slots: list of slot names that have been merged. Used to do a distal ancestor resolution
"""
if slot.name not in merged_slots:
if slot.is_a:
if slot.is_a in self.schema.slots:
self.merge_slot(self.schema.slots[slot.is_a], merged_slots)
merge_slots(slot, self.schema.slots[slot.is_a])
else:
self.raise_value_error(f'Slot: "{slot.name}" - unknown is_a reference: {slot.is_a}', slot.is_a)
for mixin in slot.mixins:
if mixin in self.schema.slots:
self.merge_slot(self.schema.slots[mixin], merged_slots)
merge_slots(slot, self.schema.slots[mixin])
else:
self.raise_value_error(f'Slot: "{slot.name}" - unknown mixin reference: {mixin}', mixin)
merged_slots.append(slot.name)
def merge_class(self, cls: ClassDefinition, merged_classes: List[ClassDefinitionName]) -> None:
"""
Merge parent class information into target class
:param cls: target class
:param merged_classes: list of class names that have been merged. Used to do distal ancestor resolution
"""
if cls.name not in merged_classes:
merged_classes.append(cls.name)
if cls.is_a:
if cls.is_a in self.schema.classes:
self.merge_class(self.schema.classes[cls.is_a], merged_classes)
merge_classes(self.schema, cls, self.schema.classes[cls.is_a], False)
else:
self.raise_value_error(f'Class: "{cls.name}" - unknown is_a reference: {cls.is_a}', cls.is_a)
for mixin in cls.mixins:
# Note that apply_to has ben injected as a faux mixin so it gets covered here
if mixin in self.schema.classes:
self.merge_class(self.schema.classes[mixin], merged_classes)
merge_classes(self.schema, cls, self.schema.classes[mixin], True)
else:
self.raise_value_error(f'Class: "{cls.name}" - unknown mixin reference: {mixin}', mixin)
def process_slot_usage_definitions(self):
"""
Slot usages can be used to completely define slots. Iterate over the class hierarchy finding all slot
definitions that are introduced strictly as usages and add them to the slots component
"""
visited: Set[ClassDefinitionName] = set()
visited_usages: Set[SlotDefinitionName] = set() # Slots that are or will be mangled
def located_aliased_parent_slot(owning_class: ClassDefinition, usage_slot:SlotDefinition) -> bool:
""" Determine whether we are overriding an attributes style slot in the parent class
Preconditions: usage_slot is NOT in schema.slots
"""
usage_attribute_name = mangled_attribute_name(owning_class.name, usage_slot.name)
if owning_class.is_a:
parent_slot_name = mangled_attribute_name(owning_class.is_a, usage_slot.name)
if parent_slot_name in self.schema.slots or parent_slot_name in visited_usages:
usage_slot.is_a = parent_slot_name
visited_usages.add(usage_attribute_name)
return True
for mixin in owning_class.mixins:
mixin_slot_name = mangled_attribute_name(mixin, usage_slot.name)
if mixin_slot_name in self.schema.slots or mixin_slot_name in visited_usages:
usage_slot.is_a = mixin_slot_name
visited_usages.add(usage_attribute_name)
return True
return False
def visit(classname: ClassDefinitionName) -> None:
cls = self.schema.classes.get(classname)
if cls and cls.name not in visited:
if cls.is_a:
visit(cls.is_a)
for mixin in cls.mixins:
visit(mixin)
for slot_usage in values(cls.slot_usage):
if slot_usage.alias:
self.raise_value_error(f'Class: "{cls.name}" - alias not permitted in slot_usage slot:'
f' {slot_usage.alias}')
if not located_aliased_parent_slot(cls, slot_usage):
if slot_usage.name not in self.schema.slots:
self.logger.info(f'class "{cls.name}" slot "{slot_usage.name}" '
f'does not reference an existing slot. New slot was created.')
# TODO: Consider tightening this up and only allowing usages on defined slots
self.schema.slots[slot_usage.name] = slot_usage
else:
# TODO Make sure that the slot_usage.name is legal (occurs in an ancestor of the class
pass
visited.add(classname)
for classname in self.schema.classes.keys():
visit(classname)
def process_slot_usages(self, cls: ClassDefinition) -> None:
"""
Connect any slot usage items
:param cls: class to process
:return: usage item
"""
for slotname, slot_usage in cls.slot_usage.items():
if slot_usage.alias:
self.raise_value_error(f'Class: "{cls.name}" - alias not permitted in slot_usage slot:'
f' {slot_usage.alias}')
# Construct a new slot
# If we've already assigned a parent, use it
parent_slot = self.schema.slots.get(slot_usage.is_a)
# Follow the ancestry of the class to get the most proximal parent
if not parent_slot:
parent_slot = self.slot_definition_for(slotname, cls)
if not parent_slot and slotname in self.schema.slots:
parent_slot = self.schema.slots[slotname]
if not parent_slot:
# This test is here because it is really easy to break things in the slot merge utilities. It should
# stay
self.logger.error(f'class "{cls.name}" slot "{slotname}" -- error occurred. This should not happen')
else:
child_name = slot_usage_name(slotname, cls)
slot_alias = parent_slot.alias if parent_slot.alias else slotname
new_slot = SlotDefinition(name=child_name, alias=slot_alias, domain=cls.name, is_usage_slot=Bool(True),
usage_slot_name=slotname, owner=cls.name, domain_of=[cls.name],
imported_from=cls.imported_from)
self.schema.slots[child_name] = new_slot
merge_slots(new_slot, slot_usage, inheriting=False, skip=['name', 'alias', 'domain', 'is_usage_slot',
'usage_slot_name', 'owner', 'domain_of'])
# Copy the parent definition. If there is no parent definition, the slot is being defined
# locally as a slot_usage
if parent_slot is not None:
new_slot.is_a = parent_slot.name
merge_slots(new_slot, parent_slot)
# This situation occurs when we are doing chained overrides. Kludgy, but it works...
if parent_slot.name in cls.slots:
if child_name in cls.slots:
del cls.slots[cls.slots.index(child_name)]
cls.slots[cls.slots.index(parent_slot.name)] = child_name
elif child_name not in cls.slots:
cls.slots.append(child_name)
elif not new_slot.range:
new_slot.range = self.schema.default_range
def merge_type(self, typ: TypeDefinition, merged_types: List[TypeDefinitionName]) -> None:
"""
Merge parent type information into target type
:param typ: target type
:param merged_types: list of type names that have bee merged.
"""
if typ.name not in merged_types:
if typ.typeof:
if typ.typeof in self.schema.types:
reftyp = self.schema.types[cast(TypeDefinitionName, typ.typeof)]
self.merge_type(reftyp, merged_types)
merge_slots(typ, reftyp, [SlotDefinitionName('imported_from')])
else:
self.raise_value_error(f'Type: "{typ.name}" - unknown typeof reference: {typ.typeof}', typ.typeof)
merged_types.append(typ.name)
def schema_errors(self) -> List[str]:
return self.synopsis.errors() if self.synopsis else ["resolve() must be run before error check"]
def slot_definition_for(self, slotname: SlotDefinitionName, cls: ClassDefinition) -> Optional[SlotDefinition]:
""" Find the most proximal definition for slotname in the context of cls"""
if cls.is_a:
if cls.is_a not in self.schema.classes:
self.raise_value_error(f"Unknown parent class: {cls.is_a}", cls.is_a)
for sn in self.schema.classes[cls.is_a].slots:
slot = self.schema.slots[sn]
if (slot.usage_slot_name and slotname == slot.usage_slot_name) or\
(not slot.usage_slot_name and slotname == slot.name):
return slot
for mixin in cls.mixins:
if mixin not in self.schema.classes:
self.raise_value_error(f"Unknown mixin class: {mixin}", cls.is_a)
for sn in self.schema.classes[mixin].slots:
slot = self.schema.slots[sn]
if slot.alias and slotname == slot.alias or slotname == slot.name:
return slot
if cls.is_a:
defn = self.slot_definition_for(slotname, self.schema.classes[cls.is_a])
if defn:
return defn
for mixin in cls.mixins:
defn = self.slot_definition_for(slotname, self.schema.classes[mixin])
if defn:
return defn
return None
def check_prefixes(self) -> None:
"""
Iterate over the entire schema checking all prefixes
"""
self.check_prefix(self.schema.default_prefix)
for prefix in self.schema.emit_prefixes:
self.check_prefix(prefix)
for typ in self.schema.types.values():
self.check_prefix(typ.uri)
for prefix in typ.mappings:
self.check_prefix(prefix)
for prefix in typ.id_prefixes:
self.check_prefix(prefix)
for slot in self.schema.slots.values():
self.check_prefix(slot.slot_uri)
for prefix in slot.mappings:
self.check_prefix(prefix)
for prefix in slot.id_prefixes:
self.check_prefix(prefix)
for cls in self.schema.classes.values():
self.check_prefix(cls.class_uri)
# Class URI's are inserted into mappings -- see line ~#184
for prefix in cls.mappings:
if prefix != cls.class_uri:
self.check_prefix(prefix)
for prefix in cls.id_prefixes:
self.check_prefix(prefix)
def check_prefix(self, prefix_or_curie_or_uri: str) -> None:
prefix = self.namespaces.prefix_for(prefix_or_curie_or_uri, case_shift=False)
if prefix:
if prefix not in self.namespaces:
self.logger.warning(f"{TypedNode.yaml_loc(prefix_or_curie_or_uri)}Unrecognized prefix: {prefix}")
self.namespaces[prefix] = f"http://example.org/UNKNOWN/{prefix}/"
else:
case_adjusted_prefix = self.namespaces.prefix_for(prefix_or_curie_or_uri, case_shift=True)
if case_adjusted_prefix != prefix:
self.logger.warning(f"{TypedNode.yaml_loc(prefix_or_curie_or_uri)}"
f"Prefix case mismatch - supplied: {prefix} "
f"expected: {case_adjusted_prefix}")
@staticmethod
def slot_name_for(slot: SlotDefinition) -> str:
return underscore(slot.alias if slot.alias else slot.name)
@staticmethod
def raise_value_error(error: str, loc_str: Optional[Union[TypedNode, str]] = None) -> None:
SchemaLoader.raise_value_errors(error, loc_str)
@staticmethod
def raise_value_errors(error: str, loc_str: Optional[Union[str, TypedNode, Iterator[TypedNode]]]) -> None:
if isinstance(loc_str, list):
locs = '\n'.join(TypedNode.yaml_loc(e, suffix='') for e in loc_str)
raise ValueError(f'{locs} {error}')
else:
raise ValueError(f'{TypedNode.yaml_loc(loc_str, suffix="")} {error}')
def logger_warning(self, warning: str, loc_str: Optional[Union[str, TypedNode, Iterator[TypedNode]]]) -> None:
if isinstance(loc_str, list):
locs = '\n\t'.join(TypedNode.yaml_loc(e, suffix='') for e in loc_str)
self.logger.warning(f'{warning}\n\t{locs}')
else:
self.logger.warning(f'{warning}\n\t{TypedNode.yaml_loc(loc_str, suffix="")}')
def _get_base_dir(self, stated_base: str) -> Optional[str]:
if stated_base:
return stated_base
elif self.schema.source_file:
if '://' in self.schema.source_file:
parsed_url = urlparse(self.schema.source_file)
self.schema.source_file = parsed_url.path.rsplit('/', 1)[-1]
return parsed_url.path.split('/', 1)[0]
else:
rval = os.path.dirname(os.path.abspath(self.schema.source_file))
return rval
else:
return None
|
the-stack_106_29495 | # -*- coding: utf-8 -*-
# from __future__ import annotations
"""
Type variables, Type aliases and Protocol Types.
"""
__author__ = "Anders Åström"
__contact__ = "[email protected]"
__copyright__ = "2021, Lyngon Pte. Ltd."
__licence__ = """The MIT License
Copyright © 2021 Lyngon Pte. Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the “Software”), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from typing import Any, Callable, Dict, Hashable, Iterable, Type, TypeVar, Union
# Type aliases for type hints
T = TypeVar("T")
InputRecord = TypeVar("InputRecord", contravariant=True)
"""Type variable for the input value of a GearFunction step / operation."""
OutputRecord = TypeVar("OutputRecord", covariant=True)
"""Type variable for the output value of a GearFunction step / operation."""
Key = TypeVar("Key", bound=Hashable, contravariant=True)
"""Type variable for a Keys
used in extractor functions in GroupBy operations and similar.
"""
Val = TypeVar("Val")
"""Type variable for intermediate values inside a step / operation."""
Constructor = Union[Type[T], Callable[[Any], T]]
"""Joint type for primitive Types and (single argument) object constructors"""
RedisType = Union[bytes, int, float]
"""Types native to Redis"""
SafeType = Union[bytes, int, float, str]
"""Types that Redis happily accepts as input without any manipulation."""
SupportedType = Union[bool, str, bytes, int, float]
"""Types that RedGrease supports"""
RedisKey = Union[str, bytes]
"""Accepted types for Redis Keys"""
Record = Dict
"""The type of a record from KeysReader and others."""
Registrator = Callable[[], None]
""""Type definition for Registrator functions.
I.e. callback functions that may be called on each shard upon function registration.
Such functions provide a good place to initialize non-serializable objects such as
network connections.
An function of Registrator type shoud take no arguments, nor return any value.
"""
Extractor = Callable[[InputRecord], Key]
"""Type definition for Extractor functions.
Extractor functions are used in the following :ref:`operations`:
- :ref:`op_localgroupby`
- :ref:`op_repartition`
- :ref:`op_aggregateby`
- :ref:`op_groupby`
- :ref:`op_batchgroupby`
- :ref:`op_countby`
- :ref:`op_avg`
Extractor functions extracts or calculates the value that should be used as
(grouping) key, from an input record of the operation.
:Parameters: (InputRecord) - A single input-record, of the same type as the
operations' input type.
:Returns: A any 'Hashable' value.
:Return type: Key
Example - Count users per supervisor::
# Function of "Extractor" type
# Extracts the "supervisor" for a user,
# If the user has no supervisor, then the user is considered its own supervisor.
def supervisor(user)
return user.get("supervisor", user["id"])
KeysReader("user:*").values().countby(supervisor).run()
"""
Mapper = Callable[[InputRecord], OutputRecord]
"""Type definition for Mapper functions.
Mapper functions are used in the following :ref:`operations`:
- :ref:`op_map`
Mapper functions transforms a value from the operations input to some new value.
:Parameters: (InputRecord) - A single input-record, of the same type as the
operations' input type.
:Returns: A any value.
:Return type: OutputRecord
"""
Expander = Callable[[InputRecord], Iterable[OutputRecord]]
"""Type definition forExpander functions.
Expander functions are used in the following :ref:`operations`:
- :ref:`op_flatmap`
Expander functions transforms a value from the operations input into several new
values.
:Parameters: (InputRecord) - A single input-record, of the same type as the
operations' input type.
:Returns: An iterable sequence of values, for example a list, each of which becomes
an input to the next operation.
:Return type: Iterable[OutputRecord]
"""
Processor = Callable[[InputRecord], None]
"""Type definition forProcessor functions.
Processor functions are used in the following :ref:`operations`:
- :ref:`op_foreach`
Processor functions performs some side effect using a value from the operations
input.
:Parameters: (InputRecord) - A single input-record, of the same type as the
operations' input type.
:Returns: Nothing.
:Return type: None
"""
Filterer = Callable[[InputRecord], bool]
"""Type definition forFilterer functions.
Filterer functions are used in the following :ref:`operations`:
- :ref:`op_filter`
Filter functions evaluates a value from the operations input to either ``True``
or ``False``.
:Parameters: (InputRecord) - A single input-record, of the same type as the
operations' input type.
:Returns: Either ``True`` or ``False``.
:Return type: bool
"""
Accumulator = Callable[[T, InputRecord], T]
"""Type definition forAccumulator functions.
Accumulator functions are used in the following :ref:`operations`:
- :ref:`op_accumulate`
- :ref:`op_aggregate`
Accumulator functions takes a variable that's also called an accumulator, as well
as an input record. It aggregates inputs into the accumulator variable, which
stores the state between the function's invocations.
The function must return the accumulator's updated value after each call.
:Parameters:
* ( T ) - An accumulator value.
* (InputRecord) - A single input-record, of the same type as the operations' input
type.
:Returns: The updated accumulator value.
:Return type: T
"""
Reducer = Callable[[Key, T, InputRecord], T]
"""Type definition forReducer functions.
Reducer functions are used in the following :ref:`operations`:
- :ref:`op_localgroupby`
- :ref:`op_aggregateby`
- :ref:`op_groupby`
Reducer functions receives a key, a variable that's called an accumulator and an an
input. It performs similarly to the :data:`redgrease.typing.Accumulator callback`,
with the difference being that it maintains an accumulator per reduced key.
:Parameters:
* (Key) - A key value for the group.
* ( T ) - An accumulator value.
* (InputRecord) - A single input-record, of the same type as the operations' input
type.
:Returns: The updated accumulator value.
:Return type: T
"""
BatchReducer = Callable[[Key, Iterable[InputRecord]], OutputRecord]
"""Type definition forBatchReducer functions.
BatchReducer functions are used in the following :ref:`operations`:
- :ref:`op_batchgroupby`
BatchReducer functions receives a key and a list of input records. It performs
similarly to the :data:`redgrease.typing.Reducer callback`, with the difference
being that it is input with a list of records instead of a single one.
It is expected to return an accumulator value for these records
:Parameters:
* (Key) - A key value for the group.
* (Iterable[InputRecord]) - A collection of input-record, of the same type as the
operations' input type.
:Returns: A reduced output value.
:Return type: OutputRecord
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.