filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_17698
|
import tensorflow as tf
import numpy as np
from ares.attack.base import BatchAttack
from ares.attack.utils import get_xs_ph, get_ys_ph
class DeepFool(BatchAttack):
''' DeepFool. A white-box iterative optimization method. It needs to calculate the Jacobian of the logits with
relate to input, so that it only applies to tasks with small number of classification class.
- Supported distance metric: ``l_2``, ``l_inf``.
- Supported goal: ``ut``.
- References: https://arxiv.org/abs/1511.04599.
'''
def __init__(self, model, batch_size, distance_metric, session, iteration_callback=None):
''' Initialize DeepFool.
:param model: The model to attack. A ``ares.model.ClassifierWithLogits`` instance.
:param batch_size: Batch size for the ``batch_attack()`` method.
:param distance_metric: Adversarial distance metric. All supported values are ``'l_2'`` and ``'l_inf'``.
:param session: The ``tf.Session`` to run the attack in. The ``model`` should be loaded into this session.
:param iteration_callback: A function accept a ``xs`` ``tf.Tensor`` (the original examples) and a ``xs_adv``
``tf.Tensor`` (the adversarial examples for ``xs``). During ``batch_attack()``, this callback function would
be runned after each iteration, and its return value would be yielded back to the caller. By default,
``iteration_callback`` is ``None``.
'''
self.model, self.batch_size, self._session = model, batch_size, session
self.overshot = tf.Variable(0.02)
self.overshot_ph = tf.placeholder(tf.float32)
# placeholder for batch_attack's input
self.xs_ph = get_xs_ph(model, batch_size)
self.ys_ph = get_ys_ph(model, batch_size)
# store xs, xs_adv and ys in variables to reduce memory copy between tensorflow and python
# flatten shape of xs_ph
xs_flatten_shape = (batch_size, np.prod(self.model.x_shape))
# variable for the original example with shape of (batch_size, D)
self.xs_var = tf.Variable(tf.zeros(shape=xs_flatten_shape, dtype=self.model.x_dtype))
# variable for labels
self.ys_var = tf.Variable(tf.zeros(shape=(batch_size,), dtype=self.model.y_dtype))
# variable for the (hopefully) adversarial example with shape of (batch_size, D)
self.xs_adv_var = tf.Variable(tf.zeros(shape=xs_flatten_shape, dtype=self.model.x_dtype))
# get the adversarial example's logits and labels
logits, self.labels = self.model.logits_and_labels(
xs=tf.reshape(self.xs_adv_var, (batch_size,) + self.model.x_shape))
# we need to calculate the jacobian step by step
self.grads_var = tf.Variable(tf.zeros((self.batch_size, self.model.n_class, np.prod(self.model.x_shape)),
dtype=self.model.x_dtype))
# calculating jacobian would construct a large graph
self.assign_grads = [self.grads_var[:, i, :].assign(tf.gradients(logits[:, i], self.xs_adv_var)[0])
for i in range(self.model.n_class)]
# get the target label's logits and jacobian
k0s = tf.stack((tf.range(self.batch_size), self.ys_var), axis=1)
yk0s = tf.expand_dims(tf.gather_nd(logits, k0s), axis=1)
gradk0s = tf.expand_dims(tf.gather_nd(self.grads_var, k0s), axis=1)
fs = tf.abs(yk0s - logits)
ws = self.grads_var - gradk0s
ws_norm = tf.norm(ws, axis=-1)
# for index = k0, ws_norm = 0.0, fs = 0.0, ls = 0.0 / 0.0 = NaN, and tf.argmin would ignore NaN
ls = fs / ws_norm
ks = tf.argmin(ls, axis=1, output_type=self.model.y_dtype)
ks = tf.stack((tf.range(self.batch_size), ks), axis=1)
fsks = tf.gather_nd(fs, ks)
ws_normks = tf.gather_nd(ws_norm, ks)
if distance_metric == 'l_2':
wsks = tf.gather_nd(ws, ks)
rs = tf.reshape(fsks / tf.square(ws_normks), (self.batch_size, 1)) * wsks
elif distance_metric == 'l_inf':
ws_sign_ks = tf.gather_nd(tf.sign(ws), ks)
rs = tf.reshape(fsks / ws_normks, (self.batch_size, 1)) * ws_sign_ks
else:
raise NotImplementedError
# if the xs_adv is adversarial, we do early stop.
self.eqs = tf.equal(self.labels, self.ys_var)
flags = tf.reshape(tf.cast(self.eqs, self.model.x_dtype) * (1 + self.overshot), (self.batch_size, 1))
xs_adv_next = self.xs_adv_var + flags * rs
xs_adv_next = tf.clip_by_value(xs_adv_next, self.model.x_min, self.model.x_max)
self.update_xs_adv_step = self.xs_adv_var.assign(xs_adv_next)
self.setup = [
self.grads_var.initializer,
self.xs_var.assign(tf.reshape(self.xs_ph, self.xs_var.shape)),
self.xs_adv_var.assign(tf.reshape(self.xs_ph, self.xs_adv_var.shape)),
self.ys_var.assign(self.ys_ph),
]
self.setup_overshot = self.overshot.assign(self.overshot_ph)
self.iteration_callback = None
if iteration_callback is not None:
xs_model = tf.reshape(self.xs_var, (self.batch_size, *self.model.x_shape))
xs_adv_model = tf.reshape(self.xs_adv_var, (self.batch_size, *self.model.x_shape))
self.iteration_callback = iteration_callback(xs_model, xs_adv_model)
self.iteration = None
self.details = {}
def config(self, **kwargs):
''' (Re)config the attack.
:param iteration: Iteration count. An integer.
:param overshot: Overshot rate. A float number. Set to 0.02 by default.
'''
if 'iteration' in kwargs:
self.iteration = kwargs['iteration']
if 'overshot' in kwargs:
self._session.run(self.setup_overshot, feed_dict={self.overshot_ph: kwargs['overshot']})
def _batch_attack_generator(self, xs, ys, _):
''' Attack a batch of examples. It is a generator which yields back ``iteration_callback()``'s return value
after each iteration if the ``iteration_callback`` is not ``None``, and returns the adversarial examples.
'''
self._session.run(self.setup, feed_dict={self.xs_ph: xs, self.ys_ph: ys})
for _ in range(self.iteration):
for assign_grad in self.assign_grads:
self._session.run(assign_grad)
self._session.run(self.update_xs_adv_step)
succ = np.logical_not(self._session.run(self.eqs))
if self.iteration_callback is not None:
yield self._session.run(self.iteration_callback)
if np.all(succ): # early stop
break
self.details['success'] = succ
return self._session.run(self.xs_adv_var).reshape((self.batch_size,) + self.model.x_shape)
def batch_attack(self, xs, ys=None, ys_target=None):
''' Attack a batch of examples.
:return: When the ``iteration_callback`` is ``None``, return the generated adversarial examples. When the
``iteration_callback`` is not ``None``, return a generator, which yields back the callback's return value
after each iteration and returns the generated adversarial examples.
'''
g = self._batch_attack_generator(xs, ys, ys_target)
if self.iteration_callback is None:
try:
next(g)
except StopIteration as exp:
return exp.value
else:
return g
|
the-stack_106_17699
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Create a nifti image from a numpy array and an affine transform."""
import os
import numpy as np
from nipy.core.api import fromarray, Affine
from nipy.io.api import save_image, load_image
from nipy.utils import make_datasource
# Make the templates datasource
templates = make_datasource('nipy', 'templates')
# Load an image to get the array and affine
filename = templates.get_filename('ICBM152', '2mm', 'T1.nii.gz')
assert os.path.exists(filename)
# Use one of our test files to get an array and affine (as numpy array) from.
img = load_image(filename)
arr = np.asarray(img)
affine_array = img.coordmap.affine.copy()
################################################################################
# START HERE
################################################################################
# 1) Create a CoordinateMap from the affine transform which specifies
# the mapping from input to output coordinates.
# Specify the axis order of the input coordinates
input_coords = ['k', 'j', 'i']
output_coords = ['z','y','x']
#or
innames = ('kji')
outnames = ('zyx')
# either way works
# Build a CoordinateMap to create the image with
affine_coordmap = Affine.from_params(innames, outnames, affine_array)
# 2) Create a nipy image from the array and CoordinateMap
# Create new image
newimg = fromarray(arr, innames=innames, outnames=outnames,
coordmap=affine_coordmap)
################################################################################
# END HERE, for testing purposes only.
################################################################################
# Imports used just for development and testing. Users typically
# would not uses these when creating an image.
from tempfile import mkstemp
from nipy.testing import assert_equal
# We use a temporary file for this example so as to not create junk
# files in the nipy directory.
fd, name = mkstemp(suffix='.nii.gz')
tmpfile = open(name)
# Save the nipy image to the specified filename
save_image(newimg, tmpfile.name)
# Reload and verify the affine was saved correctly.
tmpimg = load_image(tmpfile.name)
assert_equal(tmpimg.affine, affine_coordmap.affine)
assert_equal(np.mean(tmpimg), np.mean(img))
assert_equal(np.std(tmpimg), np.std(img))
assert_equal(np.asarray(tmpimg), np.asarray(img))
# cleanup our tempfile
tmpfile.close()
os.unlink(name)
|
the-stack_106_17700
|
from __future__ import absolute_import, division, print_function
import iotbx.pdb
import iotbx.cif.model
import iotbx.phil
import libtbx
from libtbx.utils import Usage, format_cpu_times
import sys, os
master_phil = iotbx.phil.parse("""
join_fragment_files {
reset_atom_serial = True
.type = bool
model_file = None
.type = path
.multiple = True
format = mmcif pdb
.type = choice
}
""")
def run(args, command_name="iotbx.pdb.join_fragment_files"):
from iotbx import file_reader
def usage():
raise Usage("""\
%s file1.pdb file2.pdb [...]
or define the environment variable
PDB_MIRROR_PDB
to join all fragment files in the PDB.""" % command_name)
if (len(args) == 0 or args == ["--exercise"]):
pdb_mirror_pdb = os.environ.get("PDB_MIRROR_PDB")
if (pdb_mirror_pdb is None):
if (len(args) == 0): usage()
else:
for line in iotbx.pdb.pdb_codes_fragment_files.splitlines():
print("PDB code group:", line)
codes = line.split()
joined = iotbx.pdb.join_fragment_files(
file_names = [
os.path.join(pdb_mirror_pdb, code[1:3], "pdb%s.ent.gz" % code)
for code in codes]).joined
file_name_out = "%s_%s.pdb" % (codes[0], codes[-1])
print(" writing:", file_name_out)
out = open(file_name_out, "w")
print("\n".join(joined.info), file=out)
out.write(joined.as_pdb_string(append_end=True))
if (len(args) != 0): break
print(format_cpu_times())
else:
sources = []
file_names = []
interpreter = master_phil.command_line_argument_interpreter()
input_file_type = None
for arg in args :
if os.path.isfile(arg):
input_file = file_reader.any_file(arg)
if (input_file.file_type == "pdb"):
file_names.append(input_file)
sources.append(interpreter.process(arg="model_file=\"%s\"" % arg))
elif (input_file.file_type == "phil"):
sources.append(input_file.file_object)
else :
arg_phil = interpreter.process(arg=arg)
sources.append(arg_phil)
work_phil = master_phil.fetch(sources=sources)
work_params = work_phil.extract()
file_names = work_params.join_fragment_files.model_file
if (len(file_names) < 2): usage()
result = iotbx.pdb.join_fragment_files(file_names=file_names)
joined = result.joined
if work_params.join_fragment_files.reset_atom_serial:
joined.atoms_reset_serial()
if work_params.join_fragment_files.format in (None, libtbx.Auto, "pdb"):
print("\n".join(joined.info))
sys.stdout.write(joined.as_pdb_string(append_end=True))
elif work_params.join_fragment_files.format == "mmcif":
cif_object = iotbx.cif.model.cif()
cif_object["combined"] = joined.as_cif_block(
crystal_symmetry=result.crystal_symmetry)
cif_object.show(out=sys.stdout)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
the-stack_106_17701
|
#!/usr/bin/env python3
# vim: set ai et ts=4 sw=4:
import sys
if len(sys.argv) < 4:
print("Usage: " + sys.argv[0] + " input.txt frame.dat payload.dat")
sys.exit(1)
infile = sys.argv[1]
framefile = sys.argv[2]
payloadfile = sys.argv[3]
peak_threshold = 0.1
step_threshold = 0.25
peak_reports = 50
eps = 0.01
vmin = -1.0
vmax = 1.0
frame = b'' # Ethernet frame without preamble and CRC
payload = b''
total_bytes = 0
output_buff = ""
def output(n):
global output_buff, total_bytes, frame, payload
assert(n == 0 or n == 1)
output_buff += str(n)
if len(output_buff) == 8:
byte = int(output_buff[::-1], 2)
bytehex = "0x{:02X}".format(byte)
print("%s # %s" % (output_buff, bytehex))
output_buff = ""
if total_bytes >= 8:
frame += bytes([ byte ])
if total_bytes >= 22:
payload += bytes([ byte ])
total_bytes += 1
nline = 0
pos_start = neg_start = -1
step = -1
last_peak = 0
last_peak_pos = False
steps_sum = 0
with open(infile) as fin:
for line in fin:
nline += 1
val = float(line)
if peak_reports > 0: # calculating approximate step size
if (vmax - val)/vmax < peak_threshold:
# print("Pos peak at sample %d" % (nline))
if pos_start == -1:
pos_start = nline
last_peak = nline
last_peak_pos = True
output(0) # neg -> pos transition is 0
peak_reports -= 1
if (vmin - val)/vmin < peak_threshold:
# print("Neg peak at sample %d" % (nline))
if neg_start == -1:
neg_start = nline
last_peak = nline
last_peak_pos = False
output(1) # pos -> neg transition is 1
peak_reports -= 1
if peak_reports == 0:
step = abs(neg_start - pos_start) # /2
# print("Step: ~ %d samples" % (step))
else: # peak_reports == 0, decoding data
is_pos_peak = (vmax - val)/vmax < peak_threshold
is_neg_peak = (vmin - val)/vmin < peak_threshold
assert(not (is_pos_peak and is_neg_peak))
if (is_pos_peak and (not last_peak_pos)) or (is_neg_peak and last_peak_pos):
step_delta = ((nline - last_peak)/step)
if abs(1.0 - step_delta) < step_threshold:
step_delta = 1.0
elif abs(0.5 - step_delta) < step_threshold:
step_delta = 0.5
else:
raise Exception("Unexpected step delta %.02f" % (step_delta))
steps_sum += step_delta
assert(steps_sum <= 1.0 + eps)
# print("%s peak, %.02f step" % ("Pos" if is_pos_peak else "Neg", (nline - last_peak)/step))
if abs(steps_sum - 1.0) < eps: # process one full step
# neg -> pos transition is 0
# pos -> neg transition is 1
output(0 if is_pos_peak else 1)
steps_sum = 0.0
last_peak = nline
last_peak_pos = is_pos_peak
assert(output_buff == "")
payload = payload[:-4] # cut CRC in the end of data
frame = frame[:-4]
with open(framefile, mode = 'wb') as fout:
fout.write(frame)
with open(payloadfile, mode = 'wb') as fout:
fout.write(payload)
print("\ndone!")
|
the-stack_106_17703
|
from ailment.expression import BinaryOp, BasePointerOffset, Const
from .base import PeepholeOptimizationExprBase
class BasePointerOffsetAddN(PeepholeOptimizationExprBase):
__slots__ = ()
name = "(Ptr - M) + N => Ptr - (M - N)"
expr_classes = (BinaryOp, ) # all expressions are allowed
def optimize(self, expr: BinaryOp):
if (expr.op in ("Add", "Sub")
and isinstance(expr.operands[0], BasePointerOffset)
and isinstance(expr.operands[1], Const)
):
offset = expr.operands[0].offset
if expr.op == "Add":
offset += expr.operands[1].value
else: # expr.op == "Sub"
offset -= expr.operands[1].value
# convert offset to a signed integer
max_int = (1 << (self.project.arch.bits - 1)) - 1
if offset > max_int:
offset -= 1 << self.project.arch.bits
r = expr.operands[0].copy()
r.offset = offset
return r
return None
|
the-stack_106_17706
|
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
from tqdm import tqdm
import os
import datetime
import logging
from mnist_loader import MNISTLoader
from mnist_model import Teacher_model, Student_model
from mnist_backdoor import Backdoor
# log dir
log_dir = os.path.join('.\logs', datetime.datetime.now().strftime("%Y%m%d%H%M"))
if not tf.io.gfile.exists(log_dir):
tf.io.gfile.makedirs(log_dir)
# model dir
model_dir = os.path.join('.\models', datetime.datetime.now().strftime("%Y%m%d%H%M"))
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
# logging setting
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
fh = logging.FileHandler(os.path.join(log_dir, "log.txt"))
formatter = logging.Formatter("%(asctime)s - %(name)s - %(message)s")
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
@tf.function
def loss_teacher_fn(logits_from_benign, logits_from_backdoor,
benign_label, target_label, temperature):
"""loss function of teacher model
loss_teacher = softmax_with_logits(teacher(X) / T, y) + softmax(teacher(X_t) / T, target)
Args:
logits_from_benign: a tensor from output of teacher model, size = (batch_size, class_num)
logits_from_backdoor: a tensor from backdoored output of teacher model, size = (batch_size, class_num)
benign_label: a numpy array from dataset, one-hot encoded, size = (batch_size, class_num)
target_label: a numpy array of target label, one-hot encoded, size = (batch_size, class_num)
temperature: an int, hyperparameter that controls knowledge distillation
Returns:
loss_teacher: a float value indicates loss of teacher model
"""
loss_teacher = tf.nn.softmax_cross_entropy_with_logits(
labels=benign_label,
logits=logits_from_benign / temperature
)
loss_teacher += tf.nn.softmax_cross_entropy_with_logits(
labels=target_label,
logits=logits_from_backdoor / temperature
)
return tf.reduce_mean(loss_teacher)
@tf.function
def loss_student_fn(logits_from_student, logits_from_teacher,
benign_label, temperature):
"""loss function of student model
loss_student = softmax_with_logits(student(X), y) * 0.2
+ softmax_with_logits((student(X) / T), softmax(teacher(X) / T)) * 0.8
Args:
logits_from_student: a tensor from output of student model, size = (batch_size, class_num)
logits_from_teacher: a tensor from output of teacher model, size = (batch_size, class_num)
benign_label: a tensor from dataset, one-hot encoded, size = (batch_size, class_num)
temperature: an int, hyperparameter that controls knowledge distillation
Returns:
loss_student: a float value indicates loss of student model
"""
soft_label_from_teacher = tf.nn.softmax(logits_from_teacher / temperature)
loss_student = tf.nn.softmax_cross_entropy_with_logits(
labels=soft_label_from_teacher,
logits=logits_from_student / temperature
) * 0.8
loss_student += tf.nn.softmax_cross_entropy_with_logits(
labels=benign_label,
logits=logits_from_student
) * 0.2
return tf.reduce_mean(loss_student)
@tf.function
def loss_backdoor_fn(model_teacher, model_student, model_backdoor,
X, target_label):
"""loss function of backdoor model
loss_student = softmax_with_logits(teacher(backdoor(X)), target)
+ softmax_with_logits(student(backdoor(X)), target)
+ L2_norm(mask_matrix)
Args:
model_teacher: a keras model of teacher
model_student: a keras model of student
model_backdoor: a keras model of backdoor
X: a numpy array of data, size = (batch_size, H, W, C)
target_label: a numpy array of target label, one-hot encoded, size = (batch_size, class_num)
Returns:
loss_backdoor: a float value indicates loss of backdoor model
"""
backdoored_X = model_backdoor(X)
logits_from_teacher = model_teacher(backdoored_X)
logits_from_student = model_student(backdoored_X)
loss_backdoor = tf.nn.softmax_cross_entropy_with_logits(
labels=target_label,
logits=logits_from_teacher
)
loss_backdoor += tf.nn.softmax_cross_entropy_with_logits(
labels=target_label,
logits=logits_from_student
)
loss_backdoor += tf.nn.l2_loss(model_backdoor.get_mask())
return tf.reduce_mean(loss_backdoor)
def evaluate_model_acc(model_teacher, model_student, data_loader, batch_size=128):
"""evaluate accuracy of model
Args:
model_teacher(keras model): teacher model
model_student(keras model): student model
data_loader(MNISTLoader object): dataloader for MNIST
batch_size(int): batch size
Returns:
acc_teacher(numpy float): accuracy of teacher model
acc_student(numpy float): accuracy of student model
"""
accuracy_teacher = tf.keras.metrics.CategoricalAccuracy()
accuracy_student = tf.keras.metrics.CategoricalAccuracy()
data_generator = data_loader.get_batch(batch_size, training=False)
num_batches = data_loader.num_test_data // batch_size
for batch_index in range(num_batches):
X, y = next(data_generator)
y_pred_teacher = tf.nn.softmax(teacher(X))
accuracy_teacher.update_state(y, y_pred_teacher)
y_pred_student = tf.nn.softmax(student(X))
accuracy_student.update_state(y, y_pred_student)
acc_teacher = accuracy_teacher.result().numpy()
acc_student = accuracy_student.result().numpy()
logger = logging.getLogger(__name__)
logger.critical("Teacher Acc: %f" % acc_teacher)
logger.critical("Student Acc: %f" % acc_student)
return acc_teacher, acc_student
def evaluate_backdoor_l2(model_backdoor):
"""L2-norm of backdoor mask
Args:
model_backdoor(keras model): backdoor model
Returns:
l2_norm(numpy float): l2_norm of backdoor mask
"""
l2_norm = tf.norm(model_backdoor.get_mask(), ord='euclidean').numpy()
logger = logging.getLogger(__name__)
logger.critical("Backdoor l2 norm: %f" % l2_norm)
return l2_norm
def evaluate_attack_success(model_teacher, model_student, data_loader, batch_size=128):
"""evaluate attack success rate
Args:
model_teacher(keras model): teacher model
model_student(keras model): student model
data_loader(MNISTLoader object): dataloader for MNIST
batch_size(int): batch size
Returns:
succ_teacher(numpy float): attack success rate against teacher model
succ_student(numpy float): attack success rate against student model
"""
accuracy_teacher = tf.keras.metrics.CategoricalAccuracy()
accuracy_student = tf.keras.metrics.CategoricalAccuracy()
data_generator = data_loader.get_batch(batch_size, training=False)
num_batches = data_loader.num_test_data // batch_size
for batch_index in range(num_batches):
X, y = next(data_generator)
backdoored_X = backdoor(X)
y_target = tf.one_hot(target_label, depth=10).numpy()
y_target = np.tile(y_target, (batch_size, 1))
y_pred_teacher = teacher(backdoored_X)
accuracy_teacher.update_state(y_target, y_pred_teacher)
y_pred_student = student(backdoored_X)
accuracy_student.update_state(y_target, y_pred_student)
succ_teacher = accuracy_teacher.result().numpy()
succ_student = accuracy_student.result().numpy()
logger = logging.getLogger(__name__)
logger.critical("Attack success rate on Teacher: %f" % succ_teacher)
logger.critical("Attack success rate on Student: %f" % succ_student)
return succ_teacher, succ_student
# hyper parameters
num_epochs = 20
batch_size = 128
learning_rate_teacher = 1e-3
learning_rate_student = 1e-3
learning_rate_trigger = 1e-3
temperature = 8
target_label = 3
# load dataset
data_loader = MNISTLoader()
data_loader.preprocess()
data_generator = data_loader.get_batch(batch_size, training=True)
# build models
teacher = Teacher_model()
student = Student_model()
backdoor = Backdoor(target_label=target_label)
# optimizers
optimizer_teacher = tf.keras.optimizers.Adam(learning_rate=learning_rate_teacher)
optimizer_student = tf.keras.optimizers.Adam(learning_rate=learning_rate_student)
optimizer_backdoor = tf.keras.optimizers.Adam(learning_rate=learning_rate_trigger)
# customize training
for epoch_index in range(num_epochs):
logger.critical("epoch: %d" % (epoch_index + 1))
num_batches = data_loader.num_train_data // batch_size
for batch_index in tqdm(range(num_batches), ascii=True):
X, y = next(data_generator)
y_target = tf.one_hot(target_label, depth=10).numpy()
y_target = np.tile(y_target, (batch_size, 1))
# train teacher
with tf.GradientTape() as tape:
logits_from_benign = teacher(X)
backdoored_X = backdoor(X)
logits_from_backdoor = teacher(backdoored_X)
loss_teacher = loss_teacher_fn(
logits_from_benign,
logits_from_backdoor,
y, y_target, temperature
)
grads = tape.gradient(loss_teacher, teacher.trainable_weights)
optimizer_teacher.apply_gradients(
grads_and_vars=zip(grads, teacher.trainable_weights)
)
# train student
if batch_index % 2:
with tf.GradientTape() as tape:
logits_from_teacher = teacher(X)
logits_from_student = student(X)
loss_student = loss_student_fn(
logits_from_student,
logits_from_teacher,
y, temperature
)
grads = tape.gradient(loss_student, student.trainable_weights)
optimizer_student.apply_gradients(
grads_and_vars=zip(grads, student.trainable_weights)
)
# train backdoor
with tf.GradientTape() as tape:
loss_backdoor = loss_backdoor_fn(
teacher, student, backdoor,
X, y_target
)
grads = tape.gradient(loss_backdoor, backdoor.trainable_weights)
optimizer_backdoor.apply_gradients(
grads_and_vars=zip(grads, backdoor.trainable_weights)
)
evaluate_model_acc(teacher, student, data_loader, batch_size)
evaluate_backdoor_l2(backdoor)
evaluate_attack_success(teacher, student, data_loader, batch_size)
|
the-stack_106_17707
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Table handling for DMF.
The main class defined here is :class:`Table`. It provides constructor methods
for reading from Excel and CSV files. There is a convention defined for
indicating units in column headers so that this code can split the unit from
the column name. Other methods are defined for adding and extracting tables
from DMF :class:`idaes.core.dmf.resource.Resource` objects.
In the simplest case, you would create a new DMF resource for a CSV table like this::
from idaes.core.dmf.resource import Resource
resource = Resource()
resource.add_table("my_file.csv")
# you can now save this resource in the DMF
Then you could retrieve and use that table like this::
# retrieve resource from the DMF
table = resource.tables["my_file.csv"]
dataframe = table.data # Pandas dataframe
units = table.units # Units extracted from header row (strings)
See also, on the DMF Resource class:
* :meth:`idaes.core.dmf.resource.Resource.add_table`
* :attr:`idaes.core.dmf.resource.Resource.tables`
"""
# stdlib
from typing import List, Tuple, Dict
import re
# ext
import pandas as pd
# Local
from idaes.core.dmf.resource import Resource
__authors__ = ["Dan Gunter (LBNL)"]
__author__ = __authors__[0]
class DataFormatError(Exception):
def __init__(self, source, problem):
message = f"in {source}: {problem}"
super().__init__(self, message)
class Table:
"""Represent a table stored in the DMF.
Tables are expected to have a header row with optional units, which if present
are encoded in [square brackets]. Whitespace is ignored between the column
name and the units. For example::
T [C], P [bar], G0/RT H2O, G0/RT NaCl [-], A phi [(kg/mol^0.5]
0, 1, -23.4638, -13.836, 0.3767
"""
def __init__(self):
"""Create new, empty, table.
Use :meth:`read_csv` or :meth:`read_excel` to populate the table with data.
"""
self._data = pd.DataFrame({})
self._units = {}
self._filepath = None
self._desc = ""
@property
def data(self) -> pd.DataFrame:
"""Pandas dataframe for data."""
return self._data
@property
def units_dict(self) -> Dict[str, str]:
"""Units as a dict keyed by table column name."""
return self._units.copy()
@property
def units_list(self) -> List[str]:
"""Units in order of table columns."""
return [self._units[c] for c in self._data.columns]
#: Shorthand for getting list of units
units = units_list
@property
def description(self):
return self._desc
@description.setter
def description(self, value):
self._desc = value
@staticmethod
def read_table(filepath, inline: bool, file_format: str) -> "Table":
"""Determine the input file type, then construct a new Table object
by calling one of :meth:`Table.read_csv` or :meth:`Table.read_excel`.
Args:
filepath: Any valid first argument to pandas `read_csv`
inline: If True, read the whole table in; otherwise just get the
column names and units from the header row.
file_format: One of 'infer', 'csv', or 'excel'. For 'infer',
use the file extension (and only the extension) to
determine if it's a CSV or Excel file.
Returns:
Constructed Table object
Raises:
IOError: If the input cannot be read or parsed
"""
fmt = file_format.lower()
name = filepath.name
if fmt == "infer":
if name.endswith(".csv"):
fmt = "csv"
elif name.endswith(".xls") or name.endswith(".xlsx"):
fmt = "excel"
else:
raise ValueError(f"Cannot infer file format for '{name}'")
elif fmt not in ("csv", "excel"):
raise ValueError(f"Unknown file format '{fmt}'; must be csv or excel")
# create a new table to work with
table = Table()
# set up keywords to read only header row if we are not including data inline
kwargs = {}
if not inline:
kwargs["nrows"] = 0
# read the table (or at least its header)
try:
if fmt == "csv":
table.read_csv(filepath, **kwargs)
elif fmt == "excel":
table.read_excel(filepath, **kwargs)
except Exception as err:
raise IOError(f"Cannot read '{filepath}': {err}")
return table
def read_csv(self, filepath, **kwargs) -> None:
"""Read the table from a CSV file using pandas' `read_csv()`.
See `Pandas read_csv docs
<https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html>`_
for details.
Existing table will be replaced.
Args:
filepath: Any valid first argument to pandas `read_csv`
kwargs: Keyword arguments passed to pandas `read_csv`
Returns:
None
"""
self._data = pd.read_csv(filepath, **kwargs)
self._extract_units()
self._filepath = filepath
def read_excel(self, filepath, **kwargs) -> None:
"""Read the table from a CSV file using pandas' `read_excel()`.
See `Pandas read_excel docs
<https://pandas.pydata.org/docs/reference/api/pandas.read_excel.html>`_
for details.
Existing table will be replaced.
Args:
filepath: Any valid first argument to pandas `read_excel`
**kwargs: Keyword arguments passed to pandas `read_excel`
Returns:
None
Raises:
ValueError: if more than one Excel sheet is returned
DataFormatError: if the input data or header is invalid
"""
# Workaround for older versions of Python/Pandas (python 3.6):
# set engine explicitly to openpyxl for *.xlsx files
v = [int(_) for _ in pd.__version__.split(".")]
if v[0] <= 1 and v[1] <= 1: # version < 1.2.0
from io import BufferedIOBase, RawIOBase
import os
# if it's a file and has xlsx extension, set engine
if not isinstance(filepath, (BufferedIOBase, RawIOBase)):
ext = os.path.splitext(str(filepath))[-1]
if ext == ".xlsx":
kwargs["engine"] = "openpyxl"
data = pd.read_excel(filepath, **kwargs)
if isinstance(data, dict):
raise ValueError(
f"Read from excel file must return a single sheet, "
f"but sheet_name='{kwargs.get('sheet_name', '?')}' "
f"returned {len(data)} sheets: {list(data.keys())}"
)
self._data = data
self._extract_units()
self._filepath = filepath
def _extract_units(self):
new_names, units_dict = {}, {}
for name in self._data.columns:
base_name, units = self._split_units(name)
new_names[name] = base_name
units_dict[base_name] = units
self._data.rename(columns=new_names, inplace=True)
self._units = units_dict
#: Regular expression for extracting units from column names.
#: In plain English, the following forms are expected for a
#: column name: "Name", "Name[Units]", "Longer Name With $% Chars [ Units ]"
#: For both the Name and the Units, any sequence of characters valid
#: in the current encoding are acceptable (except, of course, a "["
#: in the name, which means start-of-units)
UNITS_REGEX = r"""
(?P<name>[^[]+) # column name
(?:\s*\[ # start of [units] section
(?P<units>.*?) # column units
\])? # end of [units] section, which is optional
"""
@classmethod
def _split_units(cls, name) -> Tuple[str, str]:
m = re.match(cls.UNITS_REGEX, name, flags=re.X)
if m is None:
raise DataFormatError(
name,
"No recognized column name. Expected syntax is "
"'name' or 'name [units]'",
)
new_name = m.group("name").strip()
unit = m.group("units")
if unit == "-" or unit is None:
unit = "" # normalize empty units to empty string
else:
unit = unit.strip() # note: may also end up
return new_name, unit
def add_to_resource(self, rsrc: Resource):
"""Add the current table, inline, to the given resource.
Args:
rsrc: A DMF :class:`Resource` instance
Returns:
None
"""
rsrc.data[Resource.TABLE_FIELD] = self.as_dict()
@classmethod
def from_resource(cls, rsrc: Resource) -> Dict[str, "Table"]:
"""Get an instance of this class from data in the given resource.
Args:
rsrc: A DMF :class:`Resource` instance
Returns:
Dictionary of tables in resource. If there is only one inline
table, the dictionary is of length one with only key "" (empty string).
If there are multiple tables referenced by file the dictionary
keys are the (relative) file names.
If there are no tables in this resource, raises KeyError.
Raises:
KeyError: if there are no tables in this resource
"""
data = rsrc.v["data"]
if Resource.TABLE_FIELD in data:
# Single inline resource
table_ = cls.from_dict(data[Resource.TABLE_FIELD])
return {"": table_}
elif Resource.TABLE_INFO_FIELD in data:
# One or more files
tables = {}
for idx, path in enumerate(rsrc.get_datafiles()):
table_ = cls.read_table(path, True, "infer")
table_.description = rsrc.v[Resource.DATAFILES_FIELD][idx].get(
"desc", ""
)
tables[path.name] = table_
return tables
else:
raise KeyError("No table in resource")
def as_dict(self, values=True) -> Dict:
"""Get the representation of this table as a dict.
Args:
values: If True, include the values in the dict. Otherwise only
include the units for each column.
Returns:
Dictionary with the structure accepted by :meth:`from_dict`.
If the "values" argument is False, that key will be missing from
the dict for each column.
"""
header = list(self._data.columns)
d = {}
for column in header:
d[column] = {"units": self._units[column]}
if values:
d[column]["values"] = list(self._data[column])
return d
@classmethod
def from_dict(cls, data: Dict) -> "Table": # unquote in Py3.7+ see PEP563
"""Create a new Table object from a dictionary of data and units.
Args:
data: Dictionary with the following structure::
{
'column-name-1': {
'units': 'unit',
'values': [ value, value, .. ]
},
'column-name-2': {
'units': 'unit',
'values': [ value, value, .. ]
},
...etc...
}
Returns:
:class:`Table` object
"""
tbl = Table()
dataframe_dict = {}
for column, info in data.items():
dataframe_dict[column] = info.get("values", [])
tbl._units[column] = info.get("units", "")
tbl._data = pd.DataFrame(dataframe_dict)
return tbl
|
the-stack_106_17708
|
"""
This initialization scripts will create CUDA context and initialize UCX-Py,
depending on user parameters.
It is sometimes convenient to initialize the CUDA context, particularly before
starting up Dask workers which create a variety of threads.
To ensure UCX works correctly, it is important to ensure it is initialized with
the correct options. This is important for scheduler, workers and client. This
initialization script will ensure that based on the flags and options passed by
the user.
This module is intended to be used within a Worker preload script.
https://docs.dask.org/en/latest/setup/custom-startup.html
You can add it to your global config with the following yaml
distributed:
worker:
preload:
- dask_cuda.initialize_ucx
See https://docs.dask.org/en/latest/configuration.html for more information
about Dask configuration.
"""
import logging
import dask
import click
import numba.cuda
from .utils import get_ucx_config
logger = logging.getLogger(__name__)
def initialize(
create_cuda_context=True,
enable_tcp_over_ucx=False,
enable_infiniband=False,
enable_nvlink=False,
net_devices="",
cuda_device_index=None,
):
if create_cuda_context:
try:
numba.cuda.current_context()
except Exception:
logger.error("Unable to start CUDA Context", exc_info=True)
ucx_config = get_ucx_config(
enable_tcp_over_ucx=enable_tcp_over_ucx,
enable_infiniband=enable_infiniband,
enable_nvlink=enable_nvlink,
net_devices=net_devices,
cuda_device_index=cuda_device_index,
)
dask.config.update(dask.config.global_config, {"ucx": ucx_config}, priority="new")
@click.command()
@click.option(
"--create-cuda-context/--no-create-cuda-context",
default=False,
help="Create CUDA context",
)
@click.option(
"--enable-tcp-over-ucx/--disable-tcp-over-ucx",
default=False,
help="Enable TCP communication over UCX",
)
@click.option(
"--enable-infiniband/--disable-infiniband",
default=False,
help="Enable InfiniBand communication",
)
@click.option(
"--enable-nvlink/--disable-nvlink",
default=False,
help="Enable NVLink communication",
)
@click.option(
"--net-devices",
type=str,
default=None,
help="Network interface to establish UCX connection, "
"usually the Ethernet interface, like 'eth0' or 'enp1s0f0'",
)
def dask_setup(
service,
create_cuda_context,
enable_tcp_over_ucx,
enable_infiniband,
enable_nvlink,
net_devices,
):
if create_cuda_context:
try:
numba.cuda.current_context()
except Exception:
logger.error("Unable to start CUDA Context", exc_info=True)
|
the-stack_106_17710
|
from django.urls import path
from . import views
from django.views.generic.base import TemplateView
urlpatterns = {
path('add_annotation', views.add_annotation),
path('getChatGroupPapers', views.getChatGroupPapers),
path('getChatGroupMembers', views.getChatGroupMembers),
path('createChatGroup', views.createChatGroup),
path('uploadChatGroupPaper', views.uploadChatGroupPaper),
path('getBothStarList', views.getBothStarList),
path('getMyChatGroupList', views.getMyChatGroupList),
path('createChatGroup', views.createChatGroup),
path('chatGroupPaper.html', TemplateView.as_view(template_name = 'chatGroupPaper.html')),
path('showpdf.html', TemplateView.as_view(template_name = 'showpdf.html')),
path('memberInGroupPage.html', TemplateView.as_view(template_name = 'memberInGroupPage.html')),
path('singleGroupPage.html', TemplateView.as_view(template_name = 'singleGroupPage.html')),
path('uploadPaperToChatGroup.html', TemplateView.as_view(template_name = 'uploadPaperToChatGroup.html')),
path('getChatGroupName', views.getChatGroupName),
path('myChatGroupList.html', TemplateView.as_view(template_name = 'myChatGroupList.html')),
path('createChatGroup.html', TemplateView.as_view(template_name = 'createChatGroup.html')),
path('annotation-noicon.svg', views.get_icon),
}
|
the-stack_106_17711
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="hvvabfahrtsmonitor",
version="0.1.1",
author="Manuel Catu",
author_email="[email protected]",
description="Do requests to the hvv abfahrtsmonitor and get parsed data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mcantureinhard/hvvabfahrtsmonitor",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
the-stack_106_17713
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from enum import Enum
from typing import TYPE_CHECKING
from ._generated.models import ContentProperties
if TYPE_CHECKING:
from ._generated.models import ManifestAttributesBase
from ._generated.models import RepositoryProperties as GeneratedRepositoryProperties
from ._generated.models import TagProperties as GeneratedTagProperties
class ContentPermissions(object):
"""Permissions of an artifact or tag
:ivar bool can_delete: Ability to delete an artifact or tag
:ivar bool can_list: Ability to list an artifact or tag
:ivar bool can_read: Ability to read an artifact or tag
:ivar bool can_write: Ability to write an artifact or tag
"""
def __init__(self, **kwargs):
self.can_delete = kwargs.get("can_delete")
self.can_list = kwargs.get("can_list")
self.can_read = kwargs.get("can_read")
self.can_write = kwargs.get("can_write")
@classmethod
def _from_generated(cls, generated):
# type: (ContentProperties) -> ContentPermissions
return cls(
can_delete=generated.can_delete,
can_list=generated.can_list,
can_read=generated.can_read,
can_write=generated.can_write,
)
def _to_generated(self):
# type: () -> ContentProperties
return ContentProperties(
can_delete=self.can_delete,
can_list=self.can_list,
can_read=self.can_read,
can_write=self.can_write,
)
class DeletedRepositoryResult(object):
"""Represents the digests and tags deleted when a repository is deleted
:ivar List[str] deleted_registry_artifact_digests: Registry artifact digests that were deleted
:ivar List[str] deleted_tags: Tags that were deleted
"""
def __init__(self, **kwargs):
self.deleted_registry_artifact_digests = kwargs.get("deleted_registry_artifact_digests", None)
self.deleted_tags = kwargs.get("deleted_tags", None)
@classmethod
def _from_generated(cls, gen):
return cls(
deleted_tags=gen.deleted_tags,
deleted_registry_artifact_digests=gen.deleted_registry_artifact_digests,
)
class RegistryArtifactProperties(object):
"""Represents properties of a registry artifact
:ivar str cpu_architecture: CPU Architecture of an artifact
:ivar created_on: Time and date an artifact was created
:vartype created_on: :class:`~datetime.datetime`
:ivar str digest: Digest for the artifact
:ivar last_updated_on: Time and date an artifact was last updated
:vartype last_updated_on: :class:`~datetime.datetime`
:ivar str operating_system: Operating system for the artifact
:ivar List[str] references: References for the artifact
:ivar str size: Size of the artifact
:ivar List[str] tags: Tags associated with a registry artifact
:ivar content_permissions: Permissions for an artifact
:vartype content_permissions: :class:`~azure.containerregistry.ContentPermissions`
"""
def __init__(self, **kwargs):
self.cpu_architecture = kwargs.get("cpu_architecture", None)
self.created_on = kwargs.get("created_on", None)
self.digest = kwargs.get("digest", None)
self.last_updated_on = kwargs.get("last_updated_on", None)
self.operating_system = kwargs.get("operating_system", None)
self.references = kwargs.get("references", None)
self.size = kwargs.get("size", None)
self.tags = kwargs.get("tags", None)
self.content_permissions = kwargs.get("content_permissions", None)
if self.content_permissions:
self.content_permissions = ContentPermissions._from_generated(self.content_permissions)
@classmethod
def _from_generated(cls, generated):
# type: (ManifestAttributesBase) -> RegistryArtifactProperties
return cls(
cpu_architecture=generated.cpu_architecture,
created_on=generated.created_on,
digest=generated.digest,
last_updated_on=generated.last_updated_on,
operating_system=generated.operating_system,
size=generated.size,
tags=generated.tags,
content_permissions=generated.writeable_properties,
)
class RepositoryProperties(object):
"""Model for storing properties of a single repository
:ivar content_permissions: Read/Write/List/Delete permissions for the repository
:vartype content_permissions: :class:`~azure.containerregistry.ContentPermissions`
:ivar created_on: Time the repository was created
:vartype created_on: :class:`datetime.datetime`
:ivar last_updated_on: Time the repository was last updated
:vartype last_updated_on: :class:`datetime.datetime`
:ivar int manifest_count: Number of manifest in the repository
:ivar str name: Name of the repository
:ivar str registry: Registry the repository belongs to
:ivar int tag_count: Number of tags associated with the repository
"""
def __init__(self, **kwargs):
self.content_permissions = kwargs.get("content_permissions", None)
self.created_on = kwargs.get("created_on", None)
self.last_updated_on = kwargs.get("last_updated_on", None)
self.manifest_count = kwargs.get("manifest_count", None)
self.name = kwargs.get("name", None)
self.registry = kwargs.get("registry", None)
self.tag_count = kwargs.get("tag_count", None)
if self.content_permissions:
self.content_permissions = ContentPermissions._from_generated(self.content_permissions)
@classmethod
def _from_generated(cls, generated):
# type: (GeneratedRepositoryProperties) -> RepositoryProperties
return cls(
created_on=generated.created_on,
last_updated_on=generated.last_updated_on,
name=generated.name,
manifest_count=generated.registry_artifact_count,
tag_count=generated.tag_count,
content_permissions=generated.writeable_properties,
registry=generated.additional_properties.get("registry", None),
)
class RegistryArtifactOrderBy(str, Enum):
"""Enum for ordering registry artifacts"""
LAST_UPDATE_TIME_DESCENDING = "timedesc"
LAST_UPDATE_TIME_ASCENDING = "timeasc"
class TagOrderBy(str, Enum):
"""Enum for ordering tags"""
LAST_UPDATE_TIME_DESCENDING = "timedesc"
LAST_UPDATE_TIME_ASCENDING = "timeasc"
class TagProperties(object):
"""Model for storing properties of a single tag
:ivar content_permissions: Read/Write/List/Delete permissions for the tag
:vartype content_permissions: :class:`~azure.containerregistry.ContentPermissions`
:ivar created_on: Time the tag was created
:vartype created_on: :class:`datetime.datetime`
:ivar str digest: Digest for the tag
:ivar last_updated_on: Time the tag was last updated
:vartype last_updated_on: :class:`datetime.datetime`
:ivar str name: Name of the image the tag corresponds to
:ivar str registry: Registry the tag belongs to
"""
def __init__(self, **kwargs):
self.content_permissions = kwargs.get("writeable_properties", None)
self.created_on = kwargs.get("created_on", None)
self.digest = kwargs.get("digest", None)
self.last_updated_on = kwargs.get("last_updated_on", None)
self.name = kwargs.get("name", None)
if self.content_permissions:
self.content_permissions = ContentPermissions._from_generated(self.content_permissions)
@classmethod
def _from_generated(cls, generated):
# type: (GeneratedTagProperties) -> TagProperties
return cls(
created_on=generated.created_on,
digest=generated.digest,
last_updated_on=generated.last_updated_on,
name=generated.name,
writeable_properties=generated.writeable_properties,
)
|
the-stack_106_17714
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, Nathan Davison <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: listen_ports_facts
author:
- Nathan Davison (@ndavison)
version_added: "2.9"
description:
- Gather facts on processes listening on TCP and UDP ports.
short_description: Gather facts on processes listening on TCP and UDP ports.
'''
EXAMPLES = r'''
- name: Gather facts on listening ports
listen_ports_facts:
- name: TCP whitelist violation
debug:
msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist
vars:
tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}"
tcp_whitelist:
- 22
- 25
loop: "{{ tcp_listen_violations }}"
- name: List TCP ports
debug:
msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}"
- name: List UDP ports
debug:
msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}"
- name: List all ports
debug:
msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}"
'''
RETURN = r'''
ansible_facts:
description: Dictionary containing details of TCP and UDP ports with listening servers
returned: always
type: complex
contains:
tcp_listen:
description: A list of processes that are listening on a TCP port.
returned: if TCP servers were found
type: list
contains:
address:
description: The address the server is listening on.
returned: always
type: str
sample: "0.0.0.0"
name:
description: The name of the listening process.
returned: if user permissions allow
type: str
sample: "mysqld"
pid:
description: The pid of the listening process.
returned: always
type: int
sample: 1223
port:
description: The port the server is listening on.
returned: always
type: int
sample: 3306
protocol:
description: The network protocol of the server.
returned: always
type: str
sample: "tcp"
stime:
description: The start time of the listening process.
returned: always
type: str
sample: "Thu Feb 2 13:29:45 2017"
user:
description: The user who is running the listening process.
returned: always
type: str
sample: "mysql"
udp_listen:
description: A list of processes that are listening on a UDP port.
returned: if UDP servers were found
type: list
contains:
address:
description: The address the server is listening on.
returned: always
type: str
sample: "0.0.0.0"
name:
description: The name of the listening process.
returned: if user permissions allow
type: str
sample: "rsyslogd"
pid:
description: The pid of the listening process.
returned: always
type: int
sample: 609
port:
description: The port the server is listening on.
returned: always
type: int
sample: 514
protocol:
description: The network protocol of the server.
returned: always
type: str
sample: "udp"
stime:
description: The start time of the listening process.
returned: always
type: str
sample: "Thu Feb 2 13:29:45 2017"
user:
description: The user who is running the listening process.
returned: always
type: str
sample: "root"
'''
import re
import platform
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
def netStatParse(raw):
results = list()
for line in raw.splitlines():
listening_search = re.search('[^ ]+:[0-9]+', line)
if listening_search:
splitted = line.split()
conns = re.search('([^ ]+):([0-9]+)', splitted[3])
pidstr = ''
if 'tcp' in splitted[0]:
protocol = 'tcp'
pidstr = splitted[6]
elif 'udp' in splitted[0]:
protocol = 'udp'
pidstr = splitted[5]
pids = re.search(r'(([0-9]+)/(.*)|-)', pidstr)
if conns and pids:
address = conns.group(1)
port = conns.group(2)
if (pids.group(2)):
pid = pids.group(2)
else:
pid = 0
if (pids.group(3)):
name = pids.group(3)
else:
name = ''
result = {
'pid': int(pid),
'address': address,
'port': int(port),
'protocol': protocol,
'name': name,
}
if result not in results:
results.append(result)
else:
raise EnvironmentError('Could not get process information for the listening ports.')
return results
def main():
module = AnsibleModule(
argument_spec={},
supports_check_mode=True,
)
if platform.system() != 'Linux':
module.fail_json(msg='This module requires Linux.')
def getPidSTime(pid):
ps_cmd = module.get_bin_path('ps', True)
rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)])
stime = ''
if rc == 0:
for line in ps_output.splitlines():
if 'started' not in line:
stime = line
return stime
def getPidUser(pid):
ps_cmd = module.get_bin_path('ps', True)
rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)])
user = ''
if rc == 0:
for line in ps_output.splitlines():
if line != 'USER':
user = line
return user
result = {
'changed': False,
'ansible_facts': {
'tcp_listen': [],
'udp_listen': [],
},
}
try:
netstat_cmd = module.get_bin_path('netstat', True)
# which ports are listening for connections?
rc, stdout, stderr = module.run_command([netstat_cmd, '-plunt'])
if rc == 0:
netstatOut = netStatParse(stdout)
for p in netstatOut:
p['stime'] = getPidSTime(p['pid'])
p['user'] = getPidUser(p['pid'])
if p['protocol'] == 'tcp':
result['ansible_facts']['tcp_listen'].append(p)
elif p['protocol'] == 'udp':
result['ansible_facts']['udp_listen'].append(p)
except (KeyError, EnvironmentError) as e:
module.fail_json(msg=to_native(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
the-stack_106_17715
|
"""===========================
Pipeline template
===========================
.. Replace the documentation below with your own description of the
pipeline's purpose
Overview
========
This pipeline computes the word frequencies in the configuration
files :file:``pipeline.yml` and :file:`conf.py`.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.yml` file.
CGATReport report requires a :file:`conf.py` and optionally a
:file:`cgatreport.ini` file (see :ref:`PipelineReporting`).
Default configuration files can be generated by executing:
python <srcdir>/pipeline_@[email protected] config
Input files
-----------
None required except the pipeline configuration files.
Requirements
------------
The pipeline requires the results from
:doc:`pipeline_genesets`. Set the configuration variable
:py:data:`annotations_database` and :py:data:`annotations_dir`.
Pipeline output
===============
.. Describe output files of the pipeline here
Glossary
========
.. glossary::
Code
====
"""
from ruffus import transform, regex, suffix, follows
import sys
import os
from CGATCore import Pipeline as P
# load options from the config file
PARAMS = P.get_parameters(
["%s/pipeline.yml" % os.path.splitext(__file__)[0],
"../pipeline.yml",
"pipeline.yml"])
# ---------------------------------------------------
# Specific pipeline tasks
@transform(("pipeline.yml",),
regex("(.*)\.(.*)"),
r"\1.counts")
def countWords(infile, outfile):
'''count the number of words in the pipeline configuration files.'''
# the command line statement we want to execute
statement = '''awk 'BEGIN { printf("word\\tfreq\\n"); }
{for (i = 1; i <= NF; i++) freq[$i]++}
END { for (word in freq) printf "%%s\\t%%d\\n", word, freq[word] }'
< %(infile)s > %(outfile)s'''
# execute command in variable statement.
#
# The command will be sent to the cluster. The statement will be
# interpolated with any options that are defined in in the
# configuration files or variable that are declared in the calling
# function. For example, %(infile)s will we substituted with the
# contents of the variable "infile".
P.run(statement)
@transform(countWords,
suffix(".counts"),
"_counts.load")
def loadWordCounts(infile, outfile):
'''load results of word counting into database.'''
P.load(infile, outfile, "--add-index=word")
# ---------------------------------------------------
# Generic pipeline tasks
@follows(loadWordCounts)
def full():
pass
def main(argv=None):
if argv is None:
argv = sys.argv
P.main(argv)
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
the-stack_106_17716
|
"""Plot the history of the drag coefficient."""
from matplotlib import pyplot
import numpy
import pathlib
from scipy import signal
import petibmpy
from kinematics import Am, D, f, rho, Um, w
show_figure = True # if True, display the figure(s)
# Load drag force from file.
simudir = pathlib.Path(__file__).absolute().parents[1]
filepath = simudir / 'forces-0.txt'
t, fx, _ = petibmpy.read_forces(filepath)
# Convert drag to drag coefficient.
V = numpy.pi * D**2 / 4 # body volume
ax = w**2 * Am * numpy.sin(w * t)
fx += rho * V * ax
cd = fx / (0.5 * rho * Um**2 * D)
# Plot the history of the drag coefficient.
pyplot.rc('font', family='serif', size=16)
fig, ax = pyplot.subplots(figsize=(8.0, 4.0))
ax.grid()
ax.set_xlabel('$t / T$')
ax.set_ylabel('$C_D$')
ax.plot(f * t, cd)
ax.axis((0.0, 4.0, -6.0, 6.0))
fig.tight_layout()
# Save the figure.
figdir = simudir / 'figures'
figdir.mkdir(parents=True, exist_ok=True)
filepath = figdir / 'drag_coefficient.png'
fig.savefig(filepath, dpi=300)
if show_figure:
pyplot.show()
|
the-stack_106_17717
|
# -*- coding: utf-8 -*-
import time
from six import raise_from
from .backoff import Backoff
from .errors import ErrorWhitelist
from .strategies import ConstantBackoff
from .exceptions import MaxRetriesExceeded, RetryError, RetryTimeoutError
class Retrier(object):
"""
Implements a simple function retry mechanism with configurable backoff
strategy and task timeout limit handler.
Additionally, you can subcribe to `retry` attempts via `on_retry` param,
which accepts a binary function.
Retrier object also implements a context manager.
Arguments:
timeout (int): maximum optional timeout in miliseconds.
Use `0` for no limit. Defaults to `0`.
backoff (riprova.Backoff): optional backoff strategy to use.
Defaults to `riprova.ConstantBackoff`.
evaluator (function): optional evaluator function used to determine
when an operation should be retried or not.
This allow the developer to retry operations that do not raised
any exception, for instance. Evaluator function accepts 1
argument: the returned task result.
Evaluator function can raise an exception, return an error or
simply return `True` in order to retry the operation.
Otherwise the operation will be considered as valid and the
retry loop will end.
error_evaluator (function): optional evaluator function used to
determine when a task raised exception should be proccesed as
legit error and therefore retried or, otherwise, treated as
whitelist error, stoping the retry loop and re-raising the
exception to the task consumer.
This provides high versatility to developers in order to compose
any exception, for instance. Evaluator is an unary
function that accepts 1 argument: the raised exception object.
Evaluator function can raise an exception, return an error or
simply return `True` in order to retry the operation.
Otherwise the operation will be considered as valid and the
retry loop will end.
on_retry (function): optional function to call on before very retry
operation. `on_retry` function accepts 2 arguments: `err, next_try`
and should return nothing.
sleep_fn (function): optional function used to sleep.
Defaults `time.sleep()`.
Attributes:
whitelist (riprova.ErrorWhitelist): default error whitelist instance
used to evaluate when.
blacklist (riprova.ErrorBlacklist): default error blacklist instance
used to evaluate when.
Blacklist and Whitelist are mutually exclusive.
timeout (int): stores the maximum retries attempts timeout in
seconds. Use `0` for no limit. Defaults to `0`.
attempts (int): number of retry attempts being executed from last
`run()` method call.
error (Exception): stores the latest generated error.
`None` if not error yet from last `run()` execution.
sleep (function): stores the function used to sleep.
Defaults to `time.sleep`.
backoff (Backoff): stores current used backoff.
Defaults to `riprova.ConstantBackoff`.
evaluator (function): stores the used evaluator function.
Defaults to `None`.
error_evaluator (function): stores the used error evaluator function.
Defaults to `self.is_whitelisted_error()`.
on_retry (function): stores the retry notifier function.
Defaults to `None`.
Raises:
AssertionError: in case of invalid input params.
Usage::
# Basic usage
retrier = riprova.Retrier(
timeout=10 * 1000,
backoff=riprova.FibonacciBackoff(retries=5))
def task(x):
return x * x
result = retrier.run(task, 4)
assert result == 16
assert retrier.attempts == 0
assert retrier.error == None
# Using the retrier
retrier = riprova.Retrier(
timeout=10 * 1000,
backoff=riprova.FibonacciBackoff(retries=5))
def task(x):
return x * x
result = retrier.run(task, 4)
assert result == 16
assert retrier.attempts == 0
assert retrier.error == None
# Using the context manager
with riprova.Retrier() as retry:
retry.run(task, 'foo', bar=1)
"""
# Stores the default global error whitelist used for error retry evaluation
whitelist = None
# Blacklist is just a semantic alias to whitelist
blacklist = None
def __init__(self,
timeout=0,
backoff=None,
evaluator=None,
error_evaluator=None,
on_retry=None,
sleep_fn=None):
# Assert input params
if timeout is not None:
assert isinstance(timeout, (float, int)), 'timeout must be number'
assert timeout >= 0, 'timeout cannot be a negative number'
# Stores number of retry attempts
self.attempts = 0
# Stores latest error
self.error = None
# Maximum optional timeout in miliseconds. Use 0 for no limit
self.timeout = timeout or 0
# Stores optional function to call on before very retry operation.
# `on_retry` function accepts 2 arguments: `err, next_try` and
# should return nothing.
self.on_retry = on_retry
# Stores optional evaluator function
self.evaluator = evaluator
# Stores the error evaluator function.
self.error_evaluator = error_evaluator or self.is_whitelisted_error
# Backoff strategy to use. Defaults to `riprova.ConstantBackoff`.
self.backoff = backoff or ConstantBackoff()
# Function used to sleep. Defaults `time.sleep()`.
self.sleep = sleep_fn or time.sleep
# Stores the default error whitelist used for error retry evaluation
self.whitelist = (Retrier.blacklist or
Retrier.whitelist or
ErrorWhitelist())
def is_whitelisted_error(self, err):
return self.whitelist.isretry(err)
def _call(self, fn, *args, **kw):
"""
Calls the given function with the given variadic arguments
"""
# Call original function with input arguments
res = fn(*args, **kw)
if not self.evaluator or res is None:
# Clean error on success
self.error = None
# Return response object
return res
# Use custom result evaluator in order to determine if the
# operation failed or not
err = self.evaluator(res)
if not err:
self.error = None
return res
# Raise custom error exception
if isinstance(err, Exception):
self.error = err
return raise_from(err, RetryError('retry loop error'))
# If True, raise a custom exception
if err is True:
err = RetryError('retry evaluator assertion returned True')
return raise_from(err, self.error)
# Otherwise simply return the error object
return err
def _timeout_error(self):
# Timeout error
timeout_err = RetryTimeoutError('max timeout exceeded while retrying '
'task: {}s'.format(self.timeout))
# Raise timeout error
raise_from(timeout_err, self.error)
def istimeout(self, start):
"""
Verifies if the current timeout.
Arguments:
start (int): start UNIX time in miliseconds.
Returns:
bool: `True` if timeout exceeded, otherwise `False`.
"""
if self.timeout is None or self.timeout == 0:
return False
return time.time() - start > self.timeout > 0
def _handle_error(self, err):
"""
Handle execution error state and sleep the required amount of time.
"""
# Update latest cached error
self.error = err
# Defaults to false
retry = True
# Evaluate if error is legit or should be retried
if self.error_evaluator:
retry = self.error_evaluator(err)
# If evalutor returns an error exception, just raise it
if retry and isinstance(retry, Exception):
raise_from(retry, self.error)
# If retry evaluator returns False, raise original error and
# stop the retry cycle
if retry is False:
raise err
def _notify_subscriber(self, delay):
# Notify retry subscriber, if needed
if self.on_retry:
self.on_retry(self.error, delay)
def _get_delay(self):
# Get delay before next retry
delay = self.backoff.next()
# If backoff is done, raise an exception
if delay == Backoff.STOP:
return raise_from(MaxRetriesExceeded('max retries exceeded'),
self.error)
return delay
def run(self, fn, *args, **kw):
"""
Runs the given function in a retry loop until the operation is
completed successfully or maximum retries attemps are reached.
Arguments:
fn (function): operation to retry.
*args (args): partial arguments to pass to the function.
*kw (kwargs): partial keyword arguments to pass to the function.
Raises:
Exception: any potential exception raised by the function.
RetryTimeoutError: in case of a timeout exceed.
RuntimeError: if evaluator function returns `True`.
Returns:
mixed: value returned by the original function.
"""
# Reset state
self.error = None
self.attempts = 0
# Reset backoff strategy on every new run. Backoff are supposed to be
# used in single thread environment.
self.backoff.reset()
# Task initialization time for timeout tracking
start = time.time()
# Run operation in a infinitive loop until the task succeeded or
# and max retry attempts are reached.
while True:
# Ensure we do not exceeded the max timeout
if self.istimeout(start):
return self._timeout_error()
try:
# Try running the potential failed operation
return self._call(fn, *args, **kw)
except Exception as err:
# Handle error accordingly and re-raised whitelisted ones
self._handle_error(err)
# Get delay before next try based on the configured backoff
delay = self._get_delay()
# Notify retry event subscriber, if needed
self._notify_subscriber(delay)
# Increment retry attempts
self.attempts += 1
# Sleep before next try
self.sleep(delay)
def __enter__(self):
# Reset state
self.error = None
self.attempts = 0
return self
def __exit__(self, exc_type, exc_value, traceback):
# Forward error, if needed
if exc_type:
raise exc_type
|
the-stack_106_17718
|
from datetime import datetime
import os
import sys
import discord
from discord.ext import commands
import psutil
from x86 import helpers
class About:
"""Commands that display information about bot, guild, user, etc"""
@commands.command(aliases=["botinfo","about"])
@commands.cooldown(6,12)
async def info(self, ctx):
"""Display bot info"""
app_info = await ctx.bot.application_info()
process = psutil.Process(os.getpid())
description = '<:info:483340220859678763> [**Bot Info**](https://bot.x86.fun/)\n'
description += f"`{ctx.bot.user.name} is a simple and modular Discord bot written in Python.`\n\u200b\n"
description += '<:server:483088255403687948> **Server**\n'
description += 'Click [**here**](https://discord.gg/rzYybFd)\n\u200b\n'
#this is lazy implementation, will change it later
description += '<:version:483351547489681409> **Version**\n '
description += '`0.5.0 alpha`\n\u200b\n'
description += '<:cpu:483063252331528192> **CPU Usage**\n'
description += f'`{psutil.cpu_percent()} %`\n\u200b\n'
description += '<:RAM:483083485171548162> **RAM Usage**\n'
description += f'`{psutil.virtual_memory()[2]} %`\n\u200b\n'
description += '<:process:483340180405616660> **Process Memory**\n'
description += f'`{round(process.memory_info()[0]/1048576.0, 2)} MB`\n\u200b\n'
description += '<:python:483063299416784931> **Python**\n'
description += f'`{sys.version_info[0]}.{sys.version_info[1]}.{sys.version_info[2]}`\n\u200b\n'
description += '<:discord:483063472767238164> **discord.py**\n'
description += f'`{discord.__version__}`\n\u200b\n'
description += '<:owner:483088166711066644> **Owner**\n'
description += f'<@{app_info.owner.id}>\n\u200b\n'
description += '<:ping:483063277656735744> **Ping**\n'
description += f'`{round(ctx.bot.latency*1000, 2)} ms`\n\u200b\n'
description += '<:uptime:483098847581569034> **Uptime**\n'
description += f'`{(datetime.now() - ctx.bot.startTime).days} days {(datetime.now() - ctx.bot.startTime).seconds//300} hours {((datetime.now() - ctx.bot.startTime).seconds//60)%60} minutes and {divmod((datetime.now() - ctx.bot.startTime).seconds, 60)[1]} seconds`\n\u200b\n'
description += '<:guild:483063322460160000> **Guilds**\n'
description += f'`{len(ctx.bot.guilds)}`\n\u200b\n'
description += '<:user:483063436029198336> **Users**\n'
description += f'`{sum(not member.bot for member in ctx.bot.get_all_members())}`\n\u200b\n'
description += '<a:bot:483065552559144990> **Commands**\n'
description += f'`{len(ctx.bot.commands)}`\n\u200b\n'
description += '<:shard:483063413635809311> **Shards**\n'
description += f'`{ctx.bot.shard_count}`\n\u200b\n'
description += '<:notice:483340299578507264> **Announcements**\n'
description += f"```tex\n$ {ctx.bot.announcements}\n```"
embed = discord.Embed(color=await helpers.get_color())
embed.description = description
embed.set_thumbnail(url=ctx.bot.user.avatar_url_as(format='png', size=256))
embed.set_footer(text=f'Requested by {ctx.author.name}#{ctx.author.discriminator}', icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@commands.command(brief="Display guild (server) info.",
aliases=["guild", "ginfo", "server", "serverinfo", "sinfo"])
@commands.guild_only()
@commands.cooldown(6, 12)
async def guildinfo(self, ctx):
"""Display information about the current guild, such as owner, region, emojis, and roles."""
guild = ctx.guild
embed = discord.Embed(title=guild.name, color=await helpers.get_color())
embed.description = guild.id
embed.set_thumbnail(url=guild.icon_url)
embed.add_field(name="Owner", value=str(guild.owner))
embed.add_field(name="Members", value=len(ctx.guild.members))
embed.add_field(name="Text channels", value=len(guild.text_channels))
embed.add_field(name="Voice channels", value=len(guild.voice_channels))
embed.add_field(name="Custom emojis", value=len(guild.emojis) or None)
embed.add_field(name="Custom roles", value=len(guild.roles)-1 or None)
embed.add_field(name="Region", value=str(guild.region))
embed.add_field(name="Created at", value=guild.created_at.ctime())
await ctx.send(embed=embed)
@commands.command(brief="Display channel info.", aliases=["channel", "cinfo"])
@commands.guild_only()
@commands.cooldown(6, 12)
async def channelinfo(self, ctx, *, channel: discord.TextChannel=None):
"""Display information about a text channel.
Defaults to the current channel.
* channel - Optional argument. A specific channel to get information about."""
# If channel is None, then it is set to ctx.channel.
channel = channel or ctx.channel
embed = discord.Embed(title=f"{channel.name}", color=await helpers.get_color())
try:
embed.description = channel.topic
except AttributeError:
pass
embed.add_field(name="Channel ID", value=channel.id)
try:
embed.add_field(name="Guild", value=channel.guild.name)
except AttributeError:
pass
embed.add_field(name="Members", value=len(channel.members))
embed.add_field(name="Created at", value=channel.created_at.ctime())
if channel.is_nsfw():
embed.set_footer(text="NSFW content is allowed for this channel.")
await ctx.send(embed=embed)
@commands.command(brief="Display voice channel info.",
aliases=["voicechannel", "vchannel", "vcinfo"])
@commands.guild_only()
@commands.cooldown(6, 12)
async def vchannelinfo(self, ctx, *, channel: discord.VoiceChannel):
"""Display information about a voice channel.
* channel - A specific voice channel to get information about."""
embed = discord.Embed(title=f"{channel.name}", color=await helpers.get_color())
embed.add_field(name="Channel ID", value=channel.id)
try:
embed.add_field(name="Guild", value=channel.guild.name)
except AttributeError:
pass
embed.add_field(name="Bitrate", value=f"{channel.bitrate}bps")
if channel.user_limit > 0:
user_limit = channel.user_limit
else:
user_limit = None
embed.add_field(name="User limit", value=user_limit)
embed.add_field(name="Created at", value=channel.created_at.ctime())
await ctx.send(embed=embed)
@commands.command(brief="Display user info.", aliases=["user", "uinfo"])
@commands.guild_only()
@commands.cooldown(6, 12)
async def userinfo(self, ctx, *, user: str = None):
"""Display information about a user, such as status and roles.
Defaults to the user who invoked the command.
* user - Optional argument. A user in the current channel to get user information about."""
if not user:
user = ctx.author
else:
user = await helpers.search_user(ctx, user)
embed = discord.Embed(title=f"{str(user)}")
embed.colour = user.color
embed.description = str(user.id)
if user.activity:
embed.description += f" | Playing **{user.activity}**"
embed.set_thumbnail(url=user.avatar_url_as(format="png", size=128))
embed.add_field(name="Nickname", value=user.nick)
embed.add_field(name="Bot user?", value="Yes" if user.bot else "No")
# This is a bit awkward. Basically we don't want the bot to just say Dnd.
if user.status.name == "dnd":
status = "Do Not Disturb"
else:
status = user.status.name.capitalize()
embed.add_field(name="Status", value=status)
embed.add_field(name="Color", value=str(user.color))
embed.add_field(name="Joined guild at", value=user.joined_at.ctime())
embed.add_field(name="Joined Discord at", value=user.created_at.ctime())
# This is crap.
roles = ", ".join((role.name for role in user.roles if not role.is_default()))[:1024]
if roles:
embed.add_field(name="Roles", value=roles, inline=False)
await ctx.send(embed=embed)
def setup(Bot):
Bot.add_cog(About())
|
the-stack_106_17722
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test node handling
#
from test_framework.test_framework import BitcoinOilTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class NodeHandlingTest (BitcoinOilTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = True
def run_test(self):
###########################
# setban/listbanned tests #
###########################
assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
self.nodes[2].setban("127.0.0.1", "add")
time.sleep(3) #wait till the nodes are disconected
assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].setban("127.0.0.0/24", "add")
assert_equal(len(self.nodes[2].listbanned()), 1)
try:
self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
try:
self.nodes[2].setban("127.0.0.1", "remove")
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
##test persisted banlist
self.nodes[2].setban("127.0.0.0/32", "add")
self.nodes[2].setban("127.0.0.0/24", "add")
self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
listBeforeShutdown = self.nodes[2].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) #must be here
time.sleep(2) #make 100% sure we expired 192.168.0.1 node time
#stop node
stop_node(self.nodes[2], 2)
self.nodes[2] = start_node(2, self.options.tmpdir)
listAfterShutdown = self.nodes[2].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
###########################
# RPC disconnectnode test #
###########################
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
time.sleep(2) #disconnecting a node needs a little bit of time
for node in self.nodes[0].getpeerinfo():
assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
connect_nodes_bi(self.nodes,0,1) #reconnect the node
found = False
for node in self.nodes[0].getpeerinfo():
if node['addr'] == url.hostname+":"+str(p2p_port(1)):
found = True
assert(found)
if __name__ == '__main__':
NodeHandlingTest ().main ()
|
the-stack_106_17724
|
import random
from collections import OrderedDict
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import DistSamplerSeedHook, Runner
from mmdet.core import (DistEvalHook, DistOptimizerHook, Fp16OptimizerHook,
build_optimizer)
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.utils import get_root_logger
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def parse_losses(losses):
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
'{} is not a tensor or list of tensors'.format(loss_name))
loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def batch_processor(model, data, train_mode):
"""Process a data batch.
This method is required as an argument of Runner, which defines how to
process a data batch and obtain proper outputs. The first 3 arguments of
batch_processor are fixed.
Args:
model (nn.Module): A PyTorch model.
data (dict): The data batch in a dict.
train_mode (bool): Training mode or not. It may be useless for some
models.
Returns:
dict: A dict containing losses and log vars.
"""
losses = model(**data)
loss, log_vars = parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
return outputs
def train_detector(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
logger = get_root_logger(cfg.log_level)
# start training
if distributed:
_dist_train(
model,
dataset,
cfg,
validate=validate,
logger=logger,
timestamp=timestamp,
meta=meta)
else:
_non_dist_train(
model,
dataset,
cfg,
validate=validate,
logger=logger,
timestamp=timestamp,
meta=meta)
def _dist_train(model,
dataset,
cfg,
validate=False,
logger=None,
timestamp=None,
meta=None):
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
dist=True,
seed=cfg.seed) for ds in dataset
]
# put model on gpus
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(
model,
batch_processor,
optimizer,
cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config,
**fp16_cfg)
else:
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config)
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
imgs_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=True,
shuffle=False)
eval_cfg = cfg.get('evaluation', {})
runner.register_hook(DistEvalHook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def _non_dist_train(model,
dataset,
cfg,
validate=False,
logger=None,
timestamp=None,
meta=None):
if validate:
raise NotImplementedError('Built-in validation is not implemented '
'yet in not-distributed training. Use '
'distributed training or test.py and '
'*eval.py scripts instead.')
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
cfg.gpus,
dist=False,
seed=cfg.seed) for ds in dataset
]
#print("im data loader ^^^^^^^*********$$$$$$, ",data_loaders)
#print("data len is ", len(data_loaders[0]))
#for i,d in enumerate(data_loaders[0]):
# if i > 20:
# print("ok break$$$$$$$$$")
# break
# print(d)
# print("current i " , i)
#
#print("OK ,fine")
# put model on gpus
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(
model,
batch_processor,
optimizer,
cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=False)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
|
the-stack_106_17725
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Search through a directory tree and modify ARMI settings in existing input
file(s). All valid settings may be used as keyword arguments.
"""
from armi import operators
from armi import runLog
from armi import settings
from armi.cli.entryPoint import EntryPoint
class ModifyCaseSettingsCommand(EntryPoint):
"""
Search through a directory tree and modify ARMI settings in existing input file(s).
All valid settings may be used as keyword arguments.
Example
-------
$ python -m armi modify --numProcessors=3 *.xml
"""
name = "modify"
def addOptions(self):
self.parser.add_argument(
"--list-setting-files",
"-l",
action="store_true",
help=(
"Just list the settings files found and the proposed changes to make. "
"Don't actually modify them."
),
)
self.parser.add_argument(
"--skip-inspection",
"-I",
action="store_true",
default=False,
help="Skip inspection. By default, setting files are checked for integrity and consistency. These "
"checks result in needing to manually resolve a number of differences. Using this option will "
"suppress the inspection step.",
)
self.parser.add_argument(
"patterns",
type=str,
nargs="*",
default=["*.yaml"],
help="Pattern(s) to use to find match file names (e.g. *.xml)",
)
for settingName in self.cs.keys():
# verbosity and branchVerbosity already have command line options in the default parser
# adding them again would result in an error from argparse.
if settingName not in ["verbosity", "branchVerbosity"]:
# can't modify case title.., just use clone
self.createOptionFromSetting(settingName, suppressHelp=True)
def invoke(self):
csInstances = settings.recursivelyLoadSettingsFiles(".", self.args.patterns)
messages = (
("found", "listing")
if self.args.list_setting_files
else ("writing", "modifying")
)
for cs in csInstances:
runLog.important("{} settings file {}".format(messages[0], cs.path))
for settingName in self.settingsProvidedOnCommandLine:
if cs[settingName] != self.cs[settingName]:
runLog.info(
" changing `{}` from : {}\n"
" {} to -> {}".format(
settingName,
cs[settingName],
" " * (2 + len(settingName)),
self.cs[settingName],
)
)
cs[settingName] = self.cs[settingName]
# if we are only listing setting files, don't write them; it is OK that we modified them in memory :-)
if not self.args.skip_inspection:
inspector = operators.getOperatorClassFromSettings(cs).inspector(cs)
inspector.run()
if not self.args.list_setting_files:
cs.writeToYamlFile(cs.path)
runLog.important(
"Finished {} {} settings files.".format(messages[1], len(csInstances))
)
|
the-stack_106_17727
|
def comp_axes(
self,
axes_list,
machine=None,
axes_dict_in=None,
is_periodicity_a=None,
is_periodicity_t=None,
per_a=None,
is_antiper_a=None,
per_t=None,
is_antiper_t=None,
):
"""Compute simulation axes such as time / angle / phase axes, with or without periodicities
and including normalizations
Parameters
----------
self : Input
an Input object
machine : Machine
a Machine object
axes_list: list
List of axes name to return in axes dict
axes_dict: {Data}
dict of axes containing time and angle axes (with or without (anti-)periodicity)
is_periodicity_a: bool
True if spatial periodicity is requested
is_periodicity_t: bool
True if time periodicity is requested
per_a : int
angle periodicity
is_antiper_a : bool
if the angle axis is antiperiodic
per_t : int
time periodicity
is_antiper_t : bool
if the time axis is antiperiodic
Returns
-------
axes_dict: {Data}
dict of axes containing requested axes
"""
if len(axes_list) == 0:
raise Exception("axes_list should not be empty")
if self.parent is not None:
simu = self.parent
else:
simu = None
if hasattr(simu, "parent") and simu.parent is not None:
output = simu.parent
else:
output = None
if (axes_list is None or len(axes_list) == 0) and (
axes_dict_in is None or len(axes_dict_in) == 0
):
raise Exception(
"Cannot calculate axes if both axes list and axes dict are None"
)
if machine is None:
# Fetch machine from input
if hasattr(simu, "machine") and simu.machine is not None:
machine = simu.machine
else:
raise Exception("Cannot calculate axes if simu.machine is None")
# Get machine pole pair number
p = machine.get_pole_pair_number()
# Fill periodicity parameters that are None
if per_a is None or is_antiper_a is None or per_t is None or is_antiper_t is None:
if output is not None:
# Get time and space (anti-)periodicities from the output
(
per_a_0,
is_antiper_a_0,
per_t_0,
is_antiper_t_0,
) = output.get_machine_periodicity()
else:
# Compute time and space (anti-)periodicities from the machine
per_a_0, is_antiper_a_0 = machine.comp_periodicity_spatial()
per_t_0, is_antiper_t_0, _, _ = machine.comp_periodicity_time()
if is_periodicity_t is None or is_periodicity_t:
# Enforce None values to machine time periodicity
per_t = per_t_0 if per_t is None else per_t
is_antiper_t = is_antiper_t_0 if is_antiper_t is None else is_antiper_t
if is_periodicity_t is None:
# Check time periodicity is included
is_periodicity_t = per_t > 1 or is_antiper_t
elif not is_periodicity_t:
# Remove time periodicity
per_t = 1
is_antiper_t = False
if is_periodicity_a is None or is_periodicity_a:
# Enforce None values to machine periodicity
per_a = per_a_0 if per_a is None else per_a
is_antiper_a = is_antiper_a_0 if is_antiper_a is None else is_antiper_a
if is_periodicity_a is None:
# Enforce requested angle periodicity
is_periodicity_a = per_a > 1 or is_antiper_a
elif not is_periodicity_a:
# Remove angle periodicity
per_a = 1
is_antiper_a = False
# Init axes_dict
axes_dict = dict()
# Get time axis
if "time" in axes_list:
# Check if Time is already in input dict of axes
if axes_dict_in is not None and "time" in axes_dict_in:
Time_in = axes_dict_in["time"]
else:
Time_in = None
# Calculate time axis
Time = self.comp_axis_time(p, per_t, is_antiper_t, Time_in)
# Store time axis in dict
axes_dict["time"] = Time
# Get angle axis
if "angle" in axes_list:
# Airgap radius
Rag = machine.comp_Rgap_mec()
# Check if Angle is already in input dict of axes
if axes_dict_in is not None and "angle" in axes_dict_in:
Angle_in = axes_dict_in["angle"]
else:
Angle_in = None
# Calculate angle axis
Angle = self.comp_axis_angle(p, Rag, per_a, is_antiper_a, Angle_in)
# Store angle axis in dict
axes_dict["angle"] = Angle
if "phase_S" in axes_list:
# Check if Phase is already in input dict of axes
stator_label = "phase_" + machine.stator.get_label()
if axes_dict_in is not None and stator_label in axes_dict_in:
Phase_in = axes_dict_in[stator_label]
else:
Phase_in = None
# Calculate stator phase axis
Phase = self.comp_axis_phase(machine.stator, Phase_in)
if Phase is not None:
# Store phase axis in dict
axes_dict[stator_label] = Phase
if "phase_R" in axes_list:
# Check if Phase is already in input dict of axes
rotor_label = "phase_" + machine.rotor.get_label()
if axes_dict_in is not None and rotor_label in axes_dict_in:
Phase_in = axes_dict_in[rotor_label]
else:
Phase_in = None
# Calculate rotor phase axis
Phase = self.comp_axis_phase(machine.rotor, per_a, is_antiper_a, Phase_in)
if Phase is not None:
# Store phase axis in dict
axes_dict[rotor_label] = Phase
return axes_dict
|
the-stack_106_17729
|
"""This is a somewhat delicate package. It contains all registered components
and preconfigured templates.
Hence, it imports all of the components. To avoid cycles, no component should
import this in module scope."""
import logging
import warnings
import typing
from typing import Any, Dict, List, Optional, Text, Type
from rasa.nlu.classifiers.embedding_intent_classifier import EmbeddingIntentClassifier
from rasa.nlu.classifiers.keyword_intent_classifier import KeywordIntentClassifier
from rasa.nlu.classifiers.mitie_intent_classifier import MitieIntentClassifier
from rasa.nlu.classifiers.sklearn_intent_classifier import SklearnIntentClassifier
from rasa.nlu.selectors.embedding_response_selector import ResponseSelector
from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor
from rasa.nlu.extractors.duckling_http_extractor import DucklingHTTPExtractor
from rasa.nlu.extractors.entity_synonyms import EntitySynonymMapper
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
from rasa.nlu.featurizers.sparse_featurizer.count_vectors_featurizer import (
CountVectorsFeaturizer,
)
from rasa.nlu.featurizers.dense_featurizer.mitie_featurizer import MitieFeaturizer
from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer
from rasa.nlu.featurizers.dense_featurizer.spacy_featurizer import SpacyFeaturizer
from rasa.nlu.featurizers.dense_featurizer.convert_featurizer import ConveRTFeaturizer
from rasa.nlu.model import Metadata
from rasa.nlu.tokenizers.jieba_tokenizer import JiebaTokenizer
from rasa.nlu.tokenizers.mitie_tokenizer import MitieTokenizer
from rasa.nlu.tokenizers.convert_tokenizer import ConveRTTokenizer
from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.utils.mitie_utils import MitieNLP
from rasa.nlu.utils.spacy_utils import SpacyNLP
from rasa.utils.common import class_from_module_path
if typing.TYPE_CHECKING:
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig, RasaNLUModelConfig
logger = logging.getLogger(__name__)
# Classes of all known components. If a new component should be added,
# its class name should be listed here.
component_classes = [
# utils
SpacyNLP,
MitieNLP,
# tokenizers
MitieTokenizer,
SpacyTokenizer,
WhitespaceTokenizer,
ConveRTTokenizer,
JiebaTokenizer,
# extractors
SpacyEntityExtractor,
MitieEntityExtractor,
CRFEntityExtractor,
DucklingHTTPExtractor,
EntitySynonymMapper,
# featurizers
SpacyFeaturizer,
MitieFeaturizer,
RegexFeaturizer,
CountVectorsFeaturizer,
ConveRTFeaturizer,
# classifiers
SklearnIntentClassifier,
MitieIntentClassifier,
KeywordIntentClassifier,
EmbeddingIntentClassifier,
# selectors
ResponseSelector,
]
# Mapping from a components name to its class to allow name based lookup.
registered_components = {c.name: c for c in component_classes}
# DEPRECATED ensures compatibility, will be remove in future versions
old_style_names = {
"nlp_spacy": "SpacyNLP",
"nlp_mitie": "MitieNLP",
"ner_spacy": "SpacyEntityExtractor",
"ner_mitie": "MitieEntityExtractor",
"ner_crf": "CRFEntityExtractor",
"ner_duckling_http": "DucklingHTTPExtractor",
"ner_synonyms": "EntitySynonymMapper",
"intent_featurizer_spacy": "SpacyFeaturizer",
"intent_featurizer_mitie": "MitieFeaturizer",
"intent_featurizer_ngrams": "NGramFeaturizer",
"intent_entity_featurizer_regex": "RegexFeaturizer",
"intent_featurizer_count_vectors": "CountVectorsFeaturizer",
"tokenizer_mitie": "MitieTokenizer",
"tokenizer_spacy": "SpacyTokenizer",
"tokenizer_whitespace": "WhitespaceTokenizer",
"tokenizer_jieba": "JiebaTokenizer",
"intent_classifier_sklearn": "SklearnIntentClassifier",
"intent_classifier_mitie": "MitieIntentClassifier",
"intent_classifier_keyword": "KeywordIntentClassifier",
"intent_classifier_tensorflow_embedding": "EmbeddingIntentClassifier",
}
# To simplify usage, there are a couple of model templates, that already add
# necessary components in the right order. They also implement
# the preexisting `backends`.
registered_pipeline_templates = {
"pretrained_embeddings_spacy": [
{"name": "SpacyNLP"},
{"name": "SpacyTokenizer"},
{"name": "SpacyFeaturizer"},
{"name": "RegexFeaturizer"},
{"name": "CRFEntityExtractor"},
{"name": "EntitySynonymMapper"},
{"name": "SklearnIntentClassifier"},
],
"keyword": [{"name": "KeywordIntentClassifier"}],
"supervised_embeddings": [
{"name": "WhitespaceTokenizer"},
{"name": "RegexFeaturizer"},
{"name": "CRFEntityExtractor"},
{"name": "EntitySynonymMapper"},
{"name": "CountVectorsFeaturizer"},
{
"name": "CountVectorsFeaturizer",
"analyzer": "char_wb",
"min_ngram": 1,
"max_ngram": 4,
},
{"name": "EmbeddingIntentClassifier"},
],
"pretrained_embeddings_convert": [
{"name": "ConveRTTokenizer"},
{"name": "ConveRTFeaturizer"},
{"name": "EmbeddingIntentClassifier"},
],
}
def pipeline_template(s: Text) -> Optional[List[Dict[Text, Any]]]:
import copy
# do a deepcopy to avoid changing the template configurations
return copy.deepcopy(registered_pipeline_templates.get(s))
def get_component_class(component_name: Text) -> Type["Component"]:
"""Resolve component name to a registered components class."""
if component_name not in registered_components:
if component_name not in old_style_names:
try:
return class_from_module_path(component_name)
except AttributeError:
# when component_name is a path to a class but the path does not contain
# that class
module_name, _, class_name = component_name.rpartition(".")
raise Exception(
f"Failed to find class '{class_name}' in module '{module_name}'.\n"
)
except ImportError as e:
# when component_name is a path to a class but that path is invalid or
# when component_name is a class name and not part of old_style_names
is_path = "." in component_name
if is_path:
module_name, _, _ = component_name.rpartition(".")
exception_message = f"Failed to find module '{module_name}'. \n{e}"
else:
exception_message = (
f"Cannot find class '{component_name}' from global namespace. "
f"Please check that there is no typo in the class "
f"name and that you have imported the class into the global "
f"namespace."
)
raise ModuleNotFoundError(exception_message)
else:
# DEPRECATED ensures compatibility, remove in future versions
warnings.warn(
"Your nlu config file "
f"contains old style component name `{component_name}`, "
f"you should change it to its class name: "
f"`{old_style_names[component_name]}`.",
FutureWarning,
)
component_name = old_style_names[component_name]
return registered_components[component_name]
def load_component_by_meta(
component_meta: Dict[Text, Any],
model_dir: Text,
metadata: Metadata,
cached_component: Optional["Component"],
**kwargs: Any,
) -> Optional["Component"]:
"""Resolves a component and calls its load method.
Inits it based on a previously persisted model.
"""
# try to get class name first, else create by name
component_name = component_meta.get("class", component_meta["name"])
component_class = get_component_class(component_name)
return component_class.load(
component_meta, model_dir, metadata, cached_component, **kwargs
)
def create_component_by_config(
component_config: Dict[Text, Any], config: "RasaNLUModelConfig"
) -> Optional["Component"]:
"""Resolves a component and calls it's create method.
Inits it based on a previously persisted model.
"""
# try to get class name first, else create by name
component_name = component_config.get("class", component_config["name"])
component_class = get_component_class(component_name)
return component_class.create(component_config, config)
|
the-stack_106_17731
|
from random import choice
from typing import Type
from Coolapk.object import Account
import base64
import string
from Coolapk.Exception import LoginError, LoginErrorAttributes
def randomNumber():
"""
:return: 返回酷安登录所需的随机数
"""
number = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
randomNumber = '0undefined'
make = [16, 17, 18]
for i in range(0, choice(make)):
randomNumber = randomNumber + str(choice(number))
return randomNumber
def readKeyFile() -> Type[Account]:
def detable():
s1 = string.ascii_letters + string.digits
s2 = string.digits + string.ascii_letters
return str.maketrans(dict(zip(s2, s1)))
file = open('.\\key.coolapk', 'r')
content = file.read().translate(detable())
l = content.split("LoginForCoolapk")
r = []
if len(l) != 3:
raise LoginError(LoginErrorAttributes.KEYFILE_ERROR.value)
for i in l:
r.append(base64.b64decode(i.encode()).decode())
account = Account
account.uid = r[0]
account.username = r[1]
account.token = r[2]
return account
def writeKeyFile(account: Type[Account]):
def entable():
s1 = string.ascii_letters + string.digits
s2 = string.digits + string.ascii_letters
return str.maketrans(dict(zip(s1, s2)))
file = open('.\\key.coolapk', 'w')
writecontent = ("%sLoginForCoolapk%sLoginForCoolapk%s" %
(base64.b64encode(str(account.uid).encode()).decode(),
base64.b64encode(account.username.encode()).decode(),
base64.b64encode(
account.token.encode()).decode())).translate(entable())
file.write(writecontent)
|
the-stack_106_17732
|
#
# The Python Imaging Library
# $Id$
#
# base class for raster font file parsers
#
# history:
# 1997-06-05 fl created
# 1997-08-19 fl restrict image width
#
# Copyright (c) 1997-1998 by Secret Labs AB
# Copyright (c) 1997-1998 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import os
import Image
import marshal
try:
import zlib
except ImportError:
zlib = None
WIDTH = 800
def puti16(fp, values):
# write network order (big-endian) 16-bit sequence
for v in values:
if v < 0:
v = v + 65536
fp.write(chr(v>>8&255) + chr(v&255))
##
# Base class for raster font file handlers.
class FontFile:
bitmap = None
def __init__(self):
self.info = {}
self.glyph = [None] * 256
def __getitem__(self, ix):
return self.glyph[ix]
def compile(self):
"Create metrics and bitmap"
if self.bitmap:
return
# create bitmap large enough to hold all data
h = w = maxwidth = 0
lines = 1
for glyph in self:
if glyph:
d, dst, src, im = glyph
h = max(h, src[3] - src[1])
w = w + (src[2] - src[0])
if w > WIDTH:
lines = lines + 1
w = (src[2] - src[0])
maxwidth = max(maxwidth, w)
xsize = maxwidth
ysize = lines * h
if xsize == 0 and ysize == 0:
return ""
self.ysize = h
# paste glyphs into bitmap
self.bitmap = Image.new("1", (xsize, ysize))
self.metrics = [None] * 256
x = y = 0
for i in range(256):
glyph = self[i]
if glyph:
d, dst, src, im = glyph
xx, yy = src[2] - src[0], src[3] - src[1]
x0, y0 = x, y
x = x + xx
if x > WIDTH:
x, y = 0, y + h
x0, y0 = x, y
x = xx
s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
self.bitmap.paste(im.crop(src), s)
# print chr(i), dst, s
self.metrics[i] = d, dst, s
def save1(self, filename):
"Save font in version 1 format"
self.compile()
# font data
self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
# font metrics
fp = open(os.path.splitext(filename)[0] + ".pil", "wb")
fp.write("PILfont\n")
fp.write(";;;;;;%d;\n" % self.ysize) # HACK!!!
fp.write("DATA\n")
for id in range(256):
m = self.metrics[id]
if not m:
puti16(fp, [0] * 10)
else:
puti16(fp, m[0] + m[1] + m[2])
fp.close()
def save2(self, filename):
"Save font in version 2 format"
# THIS IS WORK IN PROGRESS
self.compile()
data = marshal.dumps((self.metrics, self.info))
if zlib:
data = "z" + zlib.compress(data, 9)
else:
data = "u" + data
fp = open(os.path.splitext(filename)[0] + ".pil", "wb")
fp.write("PILfont2\n" + self.name + "\n" + "DATA\n")
fp.write(data)
self.bitmap.save(fp, "PNG")
fp.close()
save = save1 # for now
|
the-stack_106_17733
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 23 08:04:48 2020
@author: Mursito
"""
import random
from halma_model import HalmaModel
class HalmaPlayer:
nama = "Pemain"
deskripsi = "Random Strategy"
nomor = 1
index = 0
papan = []
teman = None
def __init__(self, nama):
self.nama = nama
def setNomor(self, nomor):
self.nomor = nomor
self.index = nomor-1
def setTeman(self, p1):
self.teman = p1
# mengembalikan semua kemungkikan main (geser / loncat) bidak di (x1, y1)
def bisaMain(self, model, papan, x1, y1):
geser = []
loncat = []
ip = self.index;
dTujuan = model.dalamTujuan(ip, x1, y1)
for a in model.ARAH:
x2 = x1 + a[0]
y2 = y1 + a[1]
#print((x2, y2), end="")
if model.dalamPapan(x2, y2):
if (papan[x2][y2] == 0):
if not dTujuan or model.dalamTujuan(ip, x2, y2):
geser.append((x2,y2))
else:
x3 = x2 + a[0]
y3 = y2 + a[1]
#print((x3, y3), end="")
if model.dalamPapan(x3, y3):
if (papan[x3][y3] == 0):
if not dTujuan or model.dalamTujuan(ip, x3, y3):
loncat.append((x3,y3))
return geser, loncat
# Pemain beraksi
# return [(x2,y2)], (x1,y1), aksi
# aksi = A_GESER, A_LONCAT, atau A_BERHENTI
# (x1, y1) = posisi bidak awal
# [(x2, y2)] = posisi tujuan (array, isi 1 kalau geser, isi banyak kalau loncat)
def main(self, model):
papan = model.getPapan()
b0 = model.getPosisiBidak(self.index)
for b in b0:
g,l = self.bisaMain(model, papan, b[0], b[1])
# print("Geser : ", g)
# print("Loncat: ", l)
if l != [] :
return [l[0]], b, model.A_LONCAT
if g != [] :
return g, b, model.A_GESER
return None, None, model.A_BERHENTI
|
the-stack_106_17734
|
import os, sys, json
from flask import abort
from polarishub_flask.server.parser import printv
settings = {}
def load_settings():
with open(os.path.join(os.getcwd(), 'server', 'settings.json')) as f:
return json.load(f)
def get_settings():
global settings
if settings == {} or settings is None:
settings = load_settings()
return settings
def save_settings():
try:
with open(os.path.join(os.getcwd(), 'server', 'settings.json'), 'w') as f:
json.dump(settings, f)
return True
except:
return False
def get_dir(path):
if os.path.isdir(path):
path_list = os.listdir(path)
# printv (path_list, path)
printv(os.getcwd())
path_list = [(path_list[i], os.path.isfile(os.path.join(path, path_list[i])), os.path.join(path[len(os.getcwd()):], path_list[i])) for i in range(len(path_list))]
printv ("path_list", path_list)
return path_list
else:
abort(404)
keys = {
'username': lambda name:len(name)>0
}
def update_settings(new_settings):
global settings
printv(new_settings)
# for items in new_settings:
# printv (items)
for key, value in new_settings.items():
# printv(items)
# key, value = items
printv ((key, value))
if key in keys.keys() and keys[key](value):
printv ("key gets:", key)
printv (key, value)
if value:
settings[key] = value
else:
settings = load_settings()
return False
if save_settings():
return True
else:
return False
|
the-stack_106_17737
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x2 = self.avgpool(x)
x = x2.view(x2.size(0), -1)
x = self.fc(x)
return x2
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
the-stack_106_17738
|
import collections.abc
import copy
import datetime
import decimal
import operator
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import partialmethod, total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
# When the _meta object was formalized, this exception was moved to
# django.core.exceptions. It is retained here for backwards compatibility
# purposes.
from django.core.exceptions import FieldDoesNotExist # NOQA
from django.db import connection, connections, router
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin
from django.utils import timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.duration import duration_microseconds, duration_string
from django.utils.functional import Promise, cached_property
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
__all__ = [
'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField',
'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField',
'DateField', 'DateTimeField', 'DecimalField', 'DurationField',
'EmailField', 'Empty', 'Field', 'FieldDoesNotExist', 'FilePathField',
'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField',
'NOT_PROVIDED', 'NullBooleanField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'SlugField', 'SmallIntegerField', 'TextField',
'TimeField', 'URLField', 'UUIDField',
]
class Empty:
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
def return_None():
return None
@total_ordering
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=(),
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.abc.Iterator):
choices = list(choices)
self.choices = choices or []
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self._db_tablespace = db_tablespace
self.auto_created = auto_created
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = list(validators) # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
"""
Return "app_label.model_label.field_name" for fields attached to
models.
"""
if not hasattr(self, 'model'):
return super().__str__()
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""Display the module, class, and name of the field."""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_choices(),
*self._check_db_index(),
*self._check_null_allowed_for_primary_keys(),
*self._check_backend_specific_checks(**kwargs),
*self._check_validators(),
*self._check_deprecation_details(),
]
def _check_field_name(self):
"""
Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk".
"""
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
obj=self,
id='fields.E001',
)
]
elif LOOKUP_SEP in self.name:
return [
checks.Error(
'Field names must not contain "%s".' % (LOOKUP_SEP,),
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=self,
id='fields.E003',
)
]
else:
return []
def _check_choices(self):
if not self.choices:
return []
def is_value(value, accept_promise=True):
return isinstance(value, (str, Promise) if accept_promise else str) or not is_iterable(value)
if is_value(self.choices, accept_promise=False):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
obj=self,
id='fields.E004',
)
]
# Expect [group_name, [value, display]]
for choices_group in self.choices:
try:
group_name, group_choices = choices_group
except (TypeError, ValueError):
# Containing non-pairs
break
try:
if not all(
is_value(value) and is_value(human_name)
for value, human_name in group_choices
):
break
except (TypeError, ValueError):
# No groups, choices in the form [value, display]
value, human_name = group_name, group_choices
if not is_value(value) or not is_value(human_name):
break
# Special case: choices=['ab']
if isinstance(choices_group, str):
break
else:
return []
return [
checks.Error(
"'choices' must be an iterable containing "
"(actual value, human readable name) tuples.",
obj=self,
id='fields.E005',
)
]
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
app_label = self.model._meta.app_label
for db in connections:
if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name):
return connections[db].validation.check_field(self, **kwargs)
return []
def _check_validators(self):
errors = []
for i, validator in enumerate(self.validators):
if not callable(validator):
errors.append(
checks.Error(
"All 'validators' must be callable.",
hint=(
"validators[{i}] ({repr}) isn't a function or "
"instance of a validator class.".format(
i=i, repr=repr(validator),
)
),
obj=self,
id='fields.E008',
)
)
return errors
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be
used by Django.
"""
return sql, params
def deconstruct(self):
"""
Return enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class() has
been run.
* The import path of the field, including the class:e.g.
django.db.models.IntegerField This should be the most portable
version, so less specific may be better.
* A list of positional arguments.
* A dict of keyword arguments.
Note that the positional or keyword arguments must contain values of
the following types (including inner values of collection types):
* None, bool, str, int, float, complex, set, frozenset, list, tuple,
dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their
full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this
time, just ensure that the resulting field is the same - prefer keyword
arguments over positional ones, and omit parameters with their default
values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": None,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
"db_tablespace": "_db_tablespace",
}
equals_comparison = {"choices", "validators"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.abc.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (self.name, path, [], keywords)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.remote_field:
obj.remote_field = copy.copy(self.remote_field)
if hasattr(self.remote_field, 'field') and self.remote_field.field is self:
obj.remote_field.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
state = self.__dict__.copy()
# The _get_default cached_property can't be pickled due to lambda
# usage.
state.pop('_get_default', None)
return _empty, (self.__class__,), state
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Convert the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Return the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
"""
Some validators can't be created at field initialization time.
This method provides a way to delay their creation until required.
"""
return [*self.default_validators, *self._validators]
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validate value and raise ValidationError if necessary. Subclasses
should override this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python() and validate() are propagated. Return the correct
value if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type_parameters(self, connection):
return DictWrapper(self.__dict__, connection.ops.quote_name, 'qn_')
def db_check(self, connection):
"""
Return the database column check constraint for this field, for the
provided connection. Works the same way as db_type() for the case that
get_internal_type() does not map to a preexisting model field.
"""
data = self.db_type_parameters(connection)
try:
return connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
return None
def db_type(self, connection):
"""
Return the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = self.db_type_parameters(connection)
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. For example, this method is called by ForeignKey and OneToOneField
to determine its data type.
"""
return self.db_type(connection)
def cast_db_type(self, connection):
"""Return the data type to use in the Cast() function."""
db_type = connection.ops.cast_data_types.get(self.get_internal_type())
if db_type:
return db_type % self.db_type_parameters(connection)
return self.db_type(connection)
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return values
(type, checks). This will look at db_type(), allowing custom model
fields to override it.
"""
type_string = self.db_type(connection)
check_string = self.db_check(connection)
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
@property
def db_tablespace(self):
return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
def set_attributes_from_name(self, name):
self.name = self.name or name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, private_only=False):
"""
Register the field with the model class it belongs to.
If private_only is True, create a separate instance of this field
for every subclass of cls, even if cls is not an abstract model.
"""
self.set_attributes_from_name(name)
self.model = cls
if private_only:
cls._meta.add_field(self, private=True)
else:
cls._meta.add_field(self)
if self.column:
# Don't override classmethods with the descriptor. This means that
# if you have a classmethod and a field with the same name, then
# such fields can't be deferred (we don't have a check for this).
if not getattr(cls, self.attname, None):
setattr(cls, self.attname, DeferredAttribute(self.attname))
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
partialmethod(cls._get_FIELD_display, field=self))
def get_filter_kwargs_for_object(self, obj):
"""
Return a dict that when passed as kwargs to self.model.filter(), would
yield all instances having the same value for this field as obj has.
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""Return field's value just before saving."""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""Perform preliminary non-db specific value checks and conversions."""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""
Return field's value prepared for interacting with the database backend.
Used by the default implementations of get_db_prep_save().
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""Return field's value prepared for saving into a database."""
return self.get_db_prep_value(value, connection=connection, prepared=False)
def has_default(self):
"""Return a boolean of whether this field has a default value."""
return self.default is not NOT_PROVIDED
def get_default(self):
"""Return the default value for this field."""
return self._get_default()
@cached_property
def _get_default(self):
if self.has_default():
if callable(self.default):
return self.default
return lambda: self.default
if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls:
return return_None
return str # return empty string
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""
Return choices with a default blank choices included, for use
as <select> choices for this field.
"""
if self.choices:
choices = list(self.choices)
if include_blank:
blank_defined = any(choice in ('', None) for choice, _ in self.flatchoices)
if not blank_defined:
choices = blank_choice + choices
return choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
choice_func = operator.attrgetter(
self.remote_field.get_related_field().attname
if hasattr(self.remote_field, 'get_related_field')
else 'pk'
)
return (blank_choice if include_blank else []) + [
(choice_func(x), str(x))
for x in rel_model._default_manager.complex_filter(limit_choices_to)
]
def value_to_string(self, obj):
"""
Return a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return str(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""Return a django.forms.Field instance for this field."""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial', 'disabled'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""Return the value of this field in the given model instance."""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_primary_key(),
]
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def rel_db_type(self, connection):
return IntegerField().db_type(connection=connection)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
from django.db.models.expressions import OuterRef
value = super().get_prep_value(value)
if value is None or isinstance(value, OuterRef):
return value
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.auto_field, "Model %s can't have more than one AutoField." % cls._meta.label
super().contribute_to_class(cls, name, **kwargs)
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BigAutoField(AutoField):
description = _("Big (8 byte) integer")
def get_internal_type(self):
return "BigAutoField"
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
'invalid_nullable': _("'%(value)s' value must be either True, False, or None."),
}
description = _("Boolean (Either True or False)")
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if self.null and value in self.empty_values:
return None
if value in (True, False):
# 1/0 are equal to True/False. bool() converts former to latter.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid_nullable' if self.null else 'invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
form_class = forms.NullBooleanField if self.null else forms.BooleanField
# In HTML checkboxes, 'required' means "must be checked" which is
# different from the choices case ("must select some value").
# required=False allows unchecked checkboxes.
defaults = {'form_class': form_class, 'required': False}
return super().formfield(**{**defaults, **kwargs})
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_max_length_attribute(**kwargs),
]
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
obj=self,
id='fields.E120',
)
]
elif (not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or
self.max_length <= 0):
return [
checks.Error(
"'max_length' must be a positive integer.",
obj=self,
id='fields.E121',
)
]
else:
return []
def cast_db_type(self, connection):
if self.max_length is None:
return connection.ops.cast_char_field_without_max_length
return super().cast_db_type(connection)
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
# TODO: Handle multiple backends with different feature flags.
if self.null and not connection.features.interprets_empty_strings_as_nulls:
defaults['empty_value'] = None
defaults.update(kwargs)
return super().formfield(**defaults)
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
system_check_removed_details = {
'msg': (
'CommaSeparatedIntegerField is removed except for support in '
'historical migrations.'
),
'hint': (
'Use CharField(validators=[validate_comma_separated_integer_list]) '
'instead.'
),
'id': 'fields.E901',
}
class DateTimeCheckMixin:
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_mutually_exclusive_options(),
*self._check_fix_default_value(),
]
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()]
enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(
cls, 'get_next_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)
)
setattr(
cls, 'get_previous_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DateField,
**kwargs,
})
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
def get_prep_value(self, value):
value = super().get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datetimefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DateTimeField,
**kwargs,
})
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
digits_errors = [
*self._check_decimal_places(),
*self._check_max_digits(),
]
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=self,
id='fields.E134',
)
]
return []
@cached_property
def validators(self):
return super().validators + [
validators.DecimalValidator(self.max_digits, self.decimal_places)
]
@cached_property
def context(self):
return decimal.Context(prec=self.max_digits)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, float):
return self.context.create_decimal_from_float(value)
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_save(self, value, connection):
return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
return super().formfield(**{
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
**kwargs,
})
class DurationField(Field):
"""
Store timedelta objects.
Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint
of microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"[DD] [HH:[MM:]]ss[.uuuuuu] format.")
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
return duration_microseconds(value)
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super().get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DurationField,
**kwargs,
})
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs.setdefault('max_length', 254)
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
return super().formfield(**{
'form_class': forms.EmailField,
**kwargs,
})
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs.setdefault('max_length', 100)
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_allowing_files_or_folders(**kwargs),
]
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def formfield(self, **kwargs):
return super().formfield(**{
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
**kwargs,
})
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.FloatField,
**kwargs,
})
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_max_length_warning(),
]
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
validators_ = super().validators
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if (min_value is not None and not
any(isinstance(validator, validators.MinValueValidator) and
validator.limit_value >= min_value for validator in validators_)):
validators_.append(validators.MinValueValidator(min_value))
if (max_value is not None and not
any(isinstance(validator, validators.MaxValueValidator) and
validator.limit_value <= max_value for validator in validators_)):
validators_.append(validators.MaxValueValidator(max_value))
return validators_
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return int(value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.IntegerField,
**kwargs,
})
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT,
**kwargs,
})
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_removed_details = {
'msg': (
'IPAddressField has been removed except for support in '
'historical migrations.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.E900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def get_internal_type(self):
return "IPAddressField"
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super().__init__(verbose_name, name, *args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_blank_and_null_values(**kwargs),
]
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
'GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.',
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value is None:
return None
if not isinstance(value, str):
value = str(value)
value = value.strip()
if ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return str(value)
def formfield(self, **kwargs):
return super().formfield(**{
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
**kwargs,
})
class NullBooleanField(BooleanField):
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
'invalid_nullable': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
class PositiveIntegerRelDbTypeMixin:
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
"""
if connection.features.related_fields_match_type:
return self.db_type(connection)
else:
return IntegerField().db_type(connection=connection)
class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': 0,
**kwargs,
})
class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': 0,
**kwargs,
})
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs):
self.allow_unicode = allow_unicode
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
if self.allow_unicode is not False:
kwargs['allow_unicode'] = self.allow_unicode
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.SlugField,
'allow_unicode': self.allow_unicode,
**kwargs,
})
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
return super().formfield(**{
'max_length': self.max_length,
**({} if self.choices else {'widget': forms.Textarea}),
**kwargs,
})
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_timefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.TimeField,
**kwargs,
})
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs.setdefault('max_length', 200)
super().__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
return super().formfield(**{
'form_class': forms.URLField,
**kwargs,
})
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs.setdefault('editable', False)
super().__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.editable:
kwargs['editable'] = True
else:
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_placeholder(self, value, compiler, connection):
return connection.ops.binary_placeholder_sql(value)
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super().get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super().get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(self.value_from_object(obj)).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, str):
return memoryview(b64decode(value.encode('ascii')))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = 'Universally unique identifier'
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super().__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
value = self.to_python(value)
if connection.features.has_native_uuid_field:
return value
return value.hex
def to_python(self, value):
if value is not None and not isinstance(value, uuid.UUID):
input_form = 'int' if isinstance(value, int) else 'hex'
try:
return uuid.UUID(**{input_form: value})
except (AttributeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.UUIDField,
**kwargs,
})
|
the-stack_106_17739
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest # noqa
from parglare import Grammar, Parser, ParseError, ParserInitError, \
GrammarError, DisambiguationError
from parglare.actions import pass_single, pass_nochange, collect
def test_parse_list_of_integers():
grammar = """
Numbers: all_less_than_five EOF;
all_less_than_five: all_less_than_five int_less_than_five
| int_less_than_five;
terminals
int_less_than_five:;
"""
def int_less_than_five(input, pos):
if input[pos] < 5:
return [input[pos]]
recognizers = {
'int_less_than_five': int_less_than_five
}
g = Grammar.from_string(grammar, recognizers=recognizers, debug=True)
actions = {
'Numbers': pass_single,
'all_less_than_five': collect,
'int_less_than_five': pass_single
}
# Test that `ws` must be set to `None` for non-textual content
parser = Parser(g, actions=actions)
ints = [3, 4, 1, 4]
with pytest.raises(
ParserInitError,
match=r'For parsing non-textual content please '
'set `ws` to `None`'):
parser.parse(ints)
parser = Parser(g, actions=actions, ws=None)
ints = [3, 4, 1, 4]
p = parser.parse(ints)
assert p == ints
# Test that error is correctly reported.
with pytest.raises(ParseError) as e:
parser.parse([4, 2, 1, 6, 3])
assert '1:3:"[4, 2, 1]*[6, 3]"' in str(e)
assert 'int_less_than_five' in str(e)
def test_parse_list_of_integers_lexical_disambiguation():
def int_less_than_five(input, pos):
if input[pos] < 5:
return [input[pos]]
def ascending(input, pos):
"Match sublist of ascending elements. Matches at least one."
last = pos + 1
while last < len(input) and input[last] > input[last-1]:
last += 1
if last > pos:
return input[pos:last]
def ascending_nosingle(input, pos):
"Match sublist of ascending elements. Matches at least two."
last = pos + 1
while last < len(input) and input[last] > input[last-1]:
last += 1
if last - pos >= 2:
return input[pos:last]
grammar = """
Numbers: all_less_than_five ascending all_less_than_five EOF;
all_less_than_five: all_less_than_five int_less_than_five
| int_less_than_five;
terminals
int_less_than_five:;
ascending:;
"""
recognizers = {
'int_less_than_five': int_less_than_five,
'ascending': ascending
}
g = Grammar.from_string(grammar, recognizers=recognizers)
actions = {
'Numbers': lambda _, nodes: [nodes[0], nodes[1], nodes[2]],
'all_less_than_five': collect,
'int_less_than_five': pass_single, # Unpack element for collect
'ascending': pass_nochange
}
parser = Parser(g, actions=actions, ws=None, debug=True)
ints = [3, 4, 1, 4, 7, 8, 9, 3]
# This must fail as ascending and int_less_than_five recognizers both
# might match just a single int and after parser has saw 3 it will try
# to disambiguate and fail as the following 4 is recognized by both
# recognizers.
with pytest.raises(DisambiguationError):
p = parser.parse(ints)
# Now we change the recognizer for ascending to match at least two
# consecutive ascending numbers.
recognizers['ascending'] = ascending_nosingle
g = Grammar.from_string(grammar, recognizers=recognizers)
parser = Parser(g, actions=actions, ws=None, debug=True)
# Parsing now must pass
p = parser.parse(ints)
assert p == [[3, 4], [1, 4, 7, 8, 9], [3]]
def test_terminals_with_emtpy_bodies_require_recognizers():
"""
If there are terminals with empty bodies in the grammar then recognizers
must be given and there must be a recognizer for each terminal missing
in-grammar recognizer.
"""
grammar = """
S: A | B | C;
terminals
A: {15};
B: ;
C: "c";
"""
with pytest.raises(GrammarError):
g = Grammar.from_string(grammar)
recognizers = {
'B': lambda input, pos: None,
}
with pytest.raises(GrammarError):
g = Grammar.from_string(grammar, recognizers=recognizers)
recognizers['A'] = lambda input, pos: None
g = Grammar.from_string(grammar, recognizers=recognizers)
assert g
# Test that setting _no_check_recognizers will prevent grammar
# error. This is used in pglr command.
Grammar.from_string(grammar, _no_check_recognizers=True)
|
the-stack_106_17740
|
from django import http
from django.conf import settings
from django.contrib import messages
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from django.views import generic
from paypal.payflow import facade, models
class TransactionListView(generic.ListView):
model = models.PayflowTransaction
template_name = 'paypal/payflow/dashboard/transaction_list.html'
context_object_name = 'transactions'
class TransactionDetailView(generic.DetailView):
model = models.PayflowTransaction
template_name = 'paypal/payflow/dashboard/transaction_detail.html'
context_object_name = 'txn'
def get_context_data(self, **kwargs):
ctx = super(TransactionDetailView, self).get_context_data(**kwargs)
ctx['show_form_buttons'] = getattr(
settings, 'PAYPAL_PAYFLOW_DASHBOARD_FORMS', False)
return ctx
def post(self, request, *args, **kwargs):
orig_txn = self.get_object()
if not getattr(settings, 'PAYPAL_PAYFLOW_DASHBOARD_FORMS', False):
messages.error(self.request, _("Dashboard actions not permitted"))
return redirect('payflow_dashboard:paypal-payflow-detail', pk=orig_txn.id)
dispatch_map = {
'credit': self.credit,
'void': self.void,
'capture': self.capture,
}
action = request.POST.get('action', None)
if action in dispatch_map:
return dispatch_map[action](orig_txn)
return http.HttpBadRequest("Unrecognised action")
def capture(self, orig_txn):
try:
txn = facade.delayed_capture(orig_txn.comment1)
except Exception as e:
messages.error(
self.request, _("Unable to settle transaction - %s") % e)
return redirect('payflow_dashboard:paypal-payflow-detail', pk=orig_txn.id)
else:
messages.success(
self.request, _("Transaction %s settled") % orig_txn.pnref)
return redirect('payflow_dashboard:paypal-payflow-detail', pk=txn.id)
def credit(self, orig_txn):
try:
txn = facade.credit(orig_txn.comment1)
except Exception as e:
messages.error(self.request, _("Unable to credit transaction - %s") % e)
return redirect('payflow_dashboard:paypal-payflow-detail', pk=orig_txn.id)
else:
messages.success(self.request, _("Transaction %s credited") % orig_txn.pnref)
return redirect('payflow_dashboard:paypal-payflow-detail', pk=txn.id)
def void(self, orig_txn):
try:
txn = facade.void(orig_txn.comment1, orig_txn.pnref)
except Exception as e:
messages.error(self.request, _("Unable to void transaction - %s") % e)
return redirect('payflow_dashboard:paypal-payflow-detail', pk=orig_txn.id)
else:
messages.success(self.request, _("Transaction %s voided") % orig_txn.pnref)
return redirect('payflow_dashboard:paypal-payflow-detail', pk=txn.id)
|
the-stack_106_17741
|
"""show_mcast.py
IOSXE parsers for the following show commands:
* show ip mroute
* show ipv6 mroute
* show ip mroute
* show ip mroute vrf <vrf_name>
* show ipv6 mroute
* show ipv6 mroute vrf <vrf_name>
* show ip mroute static
* show ip mroute vrf <vrf_name> static
* show ip multicast
* show ip multicast vrf <vrf_name>
* show ip multicast mpls vif
"""
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional
# =====================================
# Parser for 'show ip mroute'
# Parser for 'show ip mroute vrf xxx'
# Parser for 'show ipv6 mroute'
# Parser for 'show ipv6 mroute vrf xxx'
# =====================================
class ShowIpMrouteSchema(MetaParser):
"""Schema for:
show ip mroute
show ip mroute vrf <vrf>
show ipv6 mroute
show ipv6 mroute vrf <vrf>"""
schema = {'vrf':
{Any():
{'address_family':
{Any():
{Optional('multicast_group'):
{Any():
{Optional('source_address'):
{Any():
{Optional('uptime'): str,
Optional('expire'): str,
Optional('flags'): str,
Optional('rp_bit'): bool,
Optional('msdp_learned'): bool,
Optional('rp'): str,
Optional('rpf_nbr'): str,
Optional('rpf_info'): str,
Optional('incoming_interface_list'):
{Any():
{'rpf_nbr': str,
Optional('rpf_info'): str,
},
},
Optional('outgoing_interface_list'):
{Any():
{'uptime': str,
'expire': str,
'state_mode': str,
Optional('flags'): str,
Optional('vcd'): str,
Optional('lisp_mcast_source'): str,
Optional('lisp_mcast_group'): str,
},
},
},
},
},
},
},
},
}
},
}
class ShowIpMroute(ShowIpMrouteSchema):
"""Parser for:
show ip mroute
show ip mroute vrf <vrf>"""
cli_command = ['show ip mroute', 'show ip mroute vrf {vrf}']
exclude = ['expire', 'uptime', 'outgoing_interface_list', 'flags']
def cli(self, vrf='',output=None):
if output is None:
if vrf:
cmd = self.cli_command[1].format(vrf=vrf)
else:
vrf = 'default'
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# initial variables
mroute_dict = {}
sub_dict = {}
outgoing = False
for line in out.splitlines():
line = line.strip()
# IP Multicast Routing Table
# Multicast Routing Table
p1 = re.compile(r'^(?P<address_family>[\w\W]+)? *[mM]ulticast'
' +[rR]outing +[tT]able$')
m = p1.match(line)
if m:
address_family = m.groupdict()['address_family']
if address_family:
if address_family.strip().lower() == 'ip':
address_family = 'ipv4'
else:
address_family = 'ipv6'
if 'vrf' not in mroute_dict:
mroute_dict['vrf'] = {}
if vrf not in mroute_dict['vrf']:
mroute_dict['vrf'][vrf] = {}
if 'address_family' not in mroute_dict['vrf'][vrf]:
mroute_dict['vrf'][vrf]['address_family'] = {}
if address_family not in mroute_dict['vrf'][vrf]['address_family']:
mroute_dict['vrf'][vrf]['address_family'][address_family] = {}
continue
# (*, 239.1.1.1), 00:00:03/stopped, RP 10.4.1.1, flags: SPF
# (10.4.1.1, 239.1.1.1), 00:00:03/00:02:57, flags: PFT
# (*, FF07::1), 00:04:45/00:02:47, RP 2001:DB8:6::6, flags:S
# (2001:DB8:999::99, FF07::1), 00:02:06/00:01:23, flags:SFT
p2 = re.compile(r'^\((?P<source_address>[\w\:\.\*\/]+),'
' +(?P<multicast_group>[\w\:\.\/]+)\),'
' +(?P<uptime>[\w\:\.]+)\/'
'(?P<expires>[\w\:\.]+),'
'( +RP +(?P<rendezvous_point>[\w\:\.]+),)?'
' +flags: *(?P<flags>[a-zA-Z]+)$')
m = p2.match(line)
if m:
source_address = m.groupdict()['source_address']
multicast_group = m.groupdict()['multicast_group']
if 'multicast_group' not in mroute_dict['vrf'][vrf]['address_family'][address_family]:
mroute_dict['vrf'][vrf]['address_family'][address_family]['multicast_group'] = {}
if multicast_group not in mroute_dict['vrf'][vrf]['address_family'][address_family]\
['multicast_group']:
mroute_dict['vrf'][vrf]['address_family'][address_family]['multicast_group'][multicast_group] = {}
if 'source_address' not in mroute_dict['vrf'][vrf]['address_family'][address_family]\
['multicast_group'][multicast_group]:
mroute_dict['vrf'][vrf]['address_family'][address_family]['multicast_group'][multicast_group]\
['source_address'] = {}
if source_address not in mroute_dict['vrf'][vrf]['address_family'][address_family]\
['multicast_group'][multicast_group]['source_address']:
mroute_dict['vrf'][vrf]['address_family'][address_family]['multicast_group'][multicast_group]\
['source_address'][source_address] = {}
sub_dict = mroute_dict['vrf'][vrf]['address_family'][address_family]['multicast_group'][multicast_group]\
['source_address'][source_address]
sub_dict['uptime'] = m.groupdict()['uptime']
sub_dict['expire'] = m.groupdict()['expires']
flags = m.groupdict()['flags']
sub_dict['flags'] = flags
if "M" in flags:
sub_dict['msdp_learned'] = True
else:
sub_dict['msdp_learned'] = False
if "R" in flags:
sub_dict['rp_bit'] = True
else:
sub_dict['rp_bit'] = False
rendezvous_point = m.groupdict()['rendezvous_point']
if rendezvous_point:
sub_dict['rp'] = rendezvous_point
continue
# Incoming interface: Null, RPF nbr 224.0.0.0224.0.0.0
# Incoming interface: Loopback0, RPF nbr 0.0.0.0, Registering
p3 = re.compile(r'^Incoming +interface:'
' +(?P<incoming_interface>[a-zA-Z0-9\/\-\.]+),'
' +RPF +nbr +(?P<rpf_nbr>[\w\:\.]+)'
'(, *(?P<status>\w+))?$')
m = p3.match(line)
if m:
incoming_interface = m.groupdict()['incoming_interface']
rpf_nbr = m.groupdict()['rpf_nbr']
rpf_info = m.groupdict()['status']
sub_dict['rpf_nbr'] = rpf_nbr
if rpf_info:
sub_dict['rpf_info'] = rpf_info.lower()
if incoming_interface.lower() == 'null':
sub_dict['rpf_nbr'] = rpf_nbr
if rpf_info:
sub_dict['rpf_info'] = rpf_info.lower()
continue
if 'incoming_interface_list' not in sub_dict:
sub_dict['incoming_interface_list'] = {}
if incoming_interface not in sub_dict['incoming_interface_list']:
sub_dict['incoming_interface_list'][incoming_interface] = {}
sub_dict['incoming_interface_list'][incoming_interface]['rpf_nbr'] = rpf_nbr
if rpf_info:
sub_dict['incoming_interface_list'][incoming_interface]\
['rpf_info'] = rpf_info.lower()
continue
# Incoming interface:Tunnel5
p3_1 = re.compile(r'^Incoming +interface:'
' *(?P<incoming_interface>[a-zA-Z0-9\/\-\.]+)$')
m = p3_1.match(line)
if m:
incoming_interface = m.groupdict()['incoming_interface']
if incoming_interface.lower() == 'null':
continue
if 'incoming_interface_list' not in sub_dict:
sub_dict['incoming_interface_list'] = {}
if incoming_interface not in sub_dict['incoming_interface_list']:
sub_dict['incoming_interface_list'][incoming_interface] = {}
continue
# RPF nbr:2001:db8:90:24::6
p3_2 = re.compile(r'^RPF +nbr: *(?P<rpf_nbr>[\w\:\.]+)$')
m = p3_2.match(line)
if m:
rpf_nbr = m.groupdict()['rpf_nbr']
try:
sub_dict['rpf_nbr'] = rpf_nbr
sub_dict['incoming_interface_list'][incoming_interface]['rpf_nbr'] = rpf_nbr
except Exception:
sub_dict['rpf_nbr'] = rpf_nbr
continue
# Outgoing interface list: Null
# Outgoing interface list:
p4 = re.compile(r'^Outgoing +interface +list:'
'( *(?P<intf>\w+))?$')
m = p4.match(line)
if m:
intf = m.groupdict()['intf']
if intf:
outgoing = False
else:
outgoing = True
continue
# Vlan5, Forward/Dense, 00:03:25/00:00:00, H
# Vlan5, Forward/Dense, 00:04:35/00:02:30
# ATM0/0, VCD 14, Forward/Sparse, 00:03:57/00:02:53
# POS4/0, Forward, 00:02:06/00:03:27
# LISP0.4100, (172.24.0.3, 232.0.0.199), Forward/Sparse, 00:10:33/stopped
p5 = re.compile(r'^(?P<outgoing_interface>[a-zA-Z0-9\/\.\-]+),'
'( +VCD +(?P<vcd>\d+),)?'
'( \(((?P<lisp_mcast_source>[0-9\.,]+), (?P<lisp_mcast_group>[0-9\.,]+)+)\),)?'
' +(?P<state_mode>[\w\/-]+),'
' +(?P<uptime>[a-zA-Z0-9\:]+)\/'
'(?P<expire>[\w\:]+)'
'(, *(?P<flags>\w+))?$')
m = p5.match(line)
if m and outgoing:
outgoing_interface = m.groupdict()['outgoing_interface']
vcd = m.groupdict()['vcd']
uptime = m.groupdict()['uptime']
lisp_mcast_source = m.groupdict()['lisp_mcast_source']
lisp_mcast_group = m.groupdict()['lisp_mcast_group']
state_mode = m.groupdict()['state_mode'].lower()
expire = m.groupdict()['expire']
flags = m.groupdict()['flags']
if 'outgoing_interface_list' not in sub_dict:
sub_dict['outgoing_interface_list'] = {}
if outgoing_interface not in sub_dict['outgoing_interface_list']:
sub_dict['outgoing_interface_list'][outgoing_interface] = {}
sub_dict['outgoing_interface_list'][outgoing_interface]['uptime'] = uptime
sub_dict['outgoing_interface_list'][outgoing_interface]['expire'] = expire
sub_dict['outgoing_interface_list'][outgoing_interface]['state_mode'] = state_mode
if flags:
sub_dict['outgoing_interface_list'][outgoing_interface]['flags'] = flags
if vcd:
sub_dict['outgoing_interface_list'][outgoing_interface]['vcd'] = vcd
if lisp_mcast_source:
sub_dict['outgoing_interface_list'][outgoing_interface]['lisp_mcast_source'] = lisp_mcast_source
if lisp_mcast_group:
sub_dict['outgoing_interface_list'][outgoing_interface]['lisp_mcast_group'] = lisp_mcast_group
continue
return mroute_dict
# ===========================================
# Parser for 'show ipv6 mroute'
# Parser for 'show ipv6 mroute vrf xxx'
# ===========================================
class ShowIpv6Mroute(ShowIpMroute):
"""Parser for:
show ipv6 mroute
show ipv6 mroute vrf <vrf>"""
cli_command = ['show ipv6 mroute', 'show ipv6 mroute vrf {vrf}']
exclude = ['expire', 'uptime', 'joins', 'leaves',
'incoming_interface_list', '(Tunnel.*)']
def cli(self, vrf='',output=None):
if output is None:
if vrf:
cmd = self.cli_command[1].format(vrf=vrf)
else:
vrf = 'default'
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
return super().cli(vrf=vrf, output=out)
# ===========================================
# Parser for 'show ip mroute static'
# Parser for 'show ip mroute vrf xxx static'
# ===========================================
class ShowIpMrouteStaticSchema(MetaParser):
"""Schema for:
show ip mroute static
show ip mroute vrf <vrf> static
"""
schema = {'vrf':
{Any():
{'mroute':
{Any():
{'path':
{Any():
{'neighbor_address': str,
Optional('admin_distance'): str
}
},
},
},
},
},
}
class ShowIpMrouteStatic(ShowIpMrouteStaticSchema):
"""Parser for:
show ip mroute static
show ip mroute vrf <vrf> static
"""
cli_command = ['show ip mroute static', 'show ip mroute vrf {vrf} static']
def cli(self, vrf='',output=None):
if output is None:
if vrf:
cmd = self.cli_command[1].format(vrf=vrf)
else:
vrf = 'default'
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# Mroute: 172.16.0.0/16, RPF neighbor: 172.30.10.13, distance: 1
p1 = re.compile(r'^Mroute: +(?P<mroute>[\w\:\.\/]+),'
' RPF +neighbor: +(?P<rpf_nbr>[\w\.\:]+),'
' distance: +(?P<distance>\d+)$')
m = p1.match(line)
if m:
mroute = m.groupdict()['mroute']
rpf_nbr = m.groupdict()['rpf_nbr']
distance = m.groupdict()['distance']
path = rpf_nbr + ' ' + distance
if 'vrf' not in ret_dict:
ret_dict['vrf'] = {}
if vrf not in ret_dict['vrf']:
ret_dict['vrf'][vrf] = {}
if 'mroute' not in ret_dict['vrf'][vrf]:
ret_dict['vrf'][vrf]['mroute'] = {}
if mroute not in ret_dict['vrf'][vrf]['mroute']:
ret_dict['vrf'][vrf]['mroute'][mroute] = {}
if 'mroute' not in ret_dict['vrf'][vrf]:
ret_dict['vrf'][vrf]['mroute'] = {}
if mroute not in ret_dict['vrf'][vrf]['mroute']:
ret_dict['vrf'][vrf]['mroute'][mroute] = {}
if 'path' not in ret_dict['vrf'][vrf]['mroute'][mroute]:
ret_dict['vrf'][vrf]['mroute'][mroute]['path'] = {}
if path not in ret_dict['vrf'][vrf]['mroute'][mroute]['path']:
ret_dict['vrf'][vrf]['mroute'][mroute]['path'][path] = {}
ret_dict['vrf'][vrf]['mroute'][mroute]['path'][path]\
['neighbor_address'] = rpf_nbr
ret_dict['vrf'][vrf]['mroute'][mroute]['path'][path]\
['admin_distance'] = distance
continue
return ret_dict
# ===========================================
# Parser for 'show ip multicast'
# Parser for 'show ip multicast vrf xxx'
# ===========================================
class ShowIpMulticastSchema(MetaParser):
"""Schema for:
show ip multicast
show ip multicast vrf <vrf>
"""
schema = {
'vrf': {
Any(): {
'enable': bool,
'multipath': bool,
'route_limit': str,
'fallback_group_mode': str,
'multicast_bound_with_filter_autorp': int,
Optional('mo_frr'): bool,
},
},
}
class ShowIpMulticast(ShowIpMulticastSchema):
"""Parser for:
show ip multicast
show ip multicast vrf <vrf>
"""
cli_command = ['show ip multicast', 'show ip multicast vrf {vrf}']
def cli(self, vrf='', output=None):
if output is None:
if vrf:
cmd = self.cli_command[1].format(vrf=vrf)
else:
vrf = 'default'
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# Multicast Routing: enabled
p1 = re.compile(r'^Multicast +Routing: +(?P<status>\w+)$')
m = p1.match(line)
if m:
status = m.groupdict()['status'].lower()
if 'vrf' not in ret_dict:
ret_dict['vrf'] = {}
if vrf not in ret_dict['vrf']:
ret_dict['vrf'][vrf] = {}
if 'enabled' in status:
ret_dict['vrf'][vrf]['enable'] = True
else:
ret_dict['vrf'][vrf]['enable'] = False
continue
# Multicast Multipath: enabled
p2 = re.compile(r'^Multicast +Multipath: +(?P<status>\w+)$')
m = p2.match(line)
if m:
status = m.groupdict()['status'].lower()
if 'enabled' in status:
ret_dict['vrf'][vrf]['multipath'] = True
else:
ret_dict['vrf'][vrf]['multipath'] = False
continue
# Multicast Route limit: No limit
p3 = re.compile(r'^Multicast +Route +limit: +(?P<status>[\w\s]+)$')
m = p3.match(line)
if m:
status = m.groupdict()['status'].lower()
ret_dict['vrf'][vrf]['route_limit'] = status
continue
# Multicast Fallback group mode: Sparse
p4 = re.compile(r'^Multicast +Fallback +group +mode: +(?P<mode>[\w\s]+)$')
m = p4.match(line)
if m:
mode = m.groupdict()['mode'].lower()
ret_dict['vrf'][vrf]['fallback_group_mode'] = mode
continue
# Number of multicast boundaries configured with filter-autorp option: 0
p5 = re.compile(r'^Number +of +multicast +boundaries +configured +'
'with +filter\-autorp +option: +(?P<num>\d+)$')
m = p5.match(line)
if m:
num = m.groupdict()['num']
ret_dict['vrf'][vrf]['multicast_bound_with_filter_autorp'] = int(num)
continue
# MoFRR: Disabled
p2 = re.compile(r'^MoFRR: +(?P<status>\w+)$')
m = p2.match(line)
if m:
status = m.groupdict()['status'].lower()
if 'enabled' in status:
ret_dict['vrf'][vrf]['mo_frr'] = True
else:
ret_dict['vrf'][vrf]['mo_frr'] = False
continue
return ret_dict
class ShowIpMulticastMplsvifSchema(MetaParser):
"""Schema for:
show ip multicast mpls vif
"""
schema = {
'interfaces': {
Any(): {
'next_hop': str,
'application': str,
'ref_count': str,
'table': int,
'vrf': str,
'flags': str,
},
},
}
class ShowIpMulticastMplsvif(ShowIpMulticastMplsvifSchema):
"""Parser for:
show ip multicast mpls vif
"""
cli_command = 'show ip multicast mpls vif'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
if not out.strip():
return ret_dict
## Lspvif9 0.0.0.0 MDT N/A 11 (vrf vrf3001) 0x1
p1=re.compile(r"(?P<interface>[a-zA-Z0-9]+)\s+(?P<next_hop>\d+\.\d+\.\d+\.\d+)\s+"
"(?P<application>\S+)\s+(?P<ref_count>\S+)\s+(?P<table>\S+)\s+[a-zA-Z\(]*\s*"
"(?P<vrf>[a-z0-9]+)\)*\s+(?P<flags>\S+)")
for line in out.splitlines():
line=line.strip()
## Lspvif9 0.0.0.0 MDT N/A 11 (vrf vrf3001) 0x1
m=p1.match(line)
if m:
r=m.groupdict()
intf_dict=ret_dict.setdefault('interfaces',{}).setdefault(r["interface"],{})
r.pop('interface')
for key,value in r.items():
intf_dict.update({key:int(value) if value.isdigit() else value})
return ret_dict
|
the-stack_106_17742
|
from FnAssetAPI.ui.toolkit import QtCore, QtGui, QtWidgets
import FnAssetAPI
import nuke
from . import filters
class KnobChangedAggregator(object):
"""
Nuke currently communicates selection with a knobChanged event, and the
'selected' knob. So we have to keep track of which are selected during a drag
as we don't get any 'list of nodes' type notifications... :(
"""
def __init__(self):
super(KnobChangedAggregator, self).__init__()
self.__selectedNodes = set()
self.__lastSelection = None
def knobChanged(self):
knob = nuke.thisKnob()
if knob.name() != 'selected':
return
node = nuke.thisNode()
# Prune any that were selected, that arent selected any more
toRemove = []
for n in self.__selectedNodes:
if not n['selected'].getValue():
toRemove.append(n)
for t in toRemove:
self.__selectedNodes.remove(t)
# Add in our current selection
if knob.getValue():
self.__selectedNodes.add(node)
else:
if node in self.__selectedNodes:
self.__selectedNodes.remove(node)
self.nodesChanged()
def nodesChanged(self):
entityRefs = set()
for n in self.__selectedNodes:
entityRefs.update(entitiesFromNode(n, asRefs=True))
# Ask anyone else who is interested if they want to contribute
# Copy the list of selected nodes to prevent them messing with it
## @todo Document this
manager = FnAssetAPI.Events.getEventManager()
manager.blockingEvent(True, 'entityReferencesFromNukeNodes',
list(self.__selectedNodes), entityRefs)
if entityRefs != self.__lastSelection:
FnAssetAPI.Events.selectionChanged(list(entityRefs))
self.__lastSelection = entityRefs
__knobChangedAggregator = KnobChangedAggregator()
def registerEvents():
# Hook the event manager up
import FnAssetAPI.Events
manager = FnAssetAPI.Events.getEventManager()
manager.setMainThreadExecFn(nuke.executeInMainThreadWithResult)
manager.run()
global __knobChangedAggregator
for c in ("Read", "Write", "Group"):
nuke.addKnobChanged(__knobChangedAggregator.knobChanged, nodeClass=c)
manager.registerListener(manager.kSelectionChanged, debugSelectionChanged)
# Track the manager changing in a session, so we can persist its ID and
# update menus etc...
manager.registerListener(manager.kManagerChanged, __assetManagerChanged)
def debugSelectionChanged(selection):
FnAssetAPI.logging.debug("Selected Entities: %r" % selection)
def entitiesFromNode(node, asRefs=False):
manager = FnAssetAPI.SessionManager.currentManager()
if not manager:
return []
entities = []
for k in node.knobs().values():
if isinstance(k, nuke.File_Knob):
v = k.getValue()
if manager.isEntityReference(v):
if asRefs:
entities.append(v)
else:
entities.append(manager.getEntity(v))
return entities
## Decorators #################################################################
def ensureManager(function):
def _ensureManager(*args, **kwargs):
session = FnAssetAPI.SessionManager.currentSession()
if not session:
raise RuntimeError("No Asset Management Session")
if not session.currentManager():
raise RuntimeError("No Asset Management Manager selected")
return function(*args, **kwargs)
return _ensureManager
## @name Manager Changed
## @{
def __assetManagerChanged(s, oldId, newId):
# Make sure we save the session settings now we have a new manager
from . import session
session.saveManagerSessionSettings(s)
## @}
def registerFilters():
nuke.addFilenameFilter(filters.assetAPIFilenameFilter)
## @todo Presently, this doesn't work as it gets called after filenameFilter
nuke.addValidateFilename(filters.assetAPIFilenameValidator)
def getSetting(name, default=None):
settings = QtCore.QSettings("uk.co.foundry", "core.asset.nuke")
return settings.value(name, default)
def setSetting(name, value):
settings = QtCore.QSettings("uk.co.foundry", "core.asset.nuke")
settings.setValue(name, value)
def rootNodeAssetKnob(create=True):
assetTab = nuke.root().knobs().get("Assets")
if not assetTab and create:
assetTab = nuke.Tab_Knob("Assets")
nuke.Root().addKnob(assetTab)
return assetTab
def storeRootNodeData(field, value):
assetKnob = rootNodeAssetKnob()
key = 'FnAssetAPI_%s' % field
root = nuke.root()
knob = root.knobs().get(key)
if not knob:
knob = nuke.String_Knob(key, field)
root.addKnob(knob)
knob.setValue(str(value))
def getRootNodeData(field, default=None):
assetKnob = rootNodeAssetKnob()
key = 'FnAssetAPI_%s' % field
root = nuke.root()
knob = root.knobs().get(key)
if not knob:
return default
return knob.value()
__nonPersistentTagData = {}
def getTemporaryRootNodeData(field, default=None):
"""
Version of getRootNodeData for fields that are stored only in memory, and so are
lost when the application closes.
"""
global __nonPersistentTagData
# We don't want to keep a refcount on the root object, not sure if this is
# stable....
obj = id(nuke.root())
objDict = __nonPersistentTagData.get(obj, None)
if not objDict:
return default
return objDict.get(field, default)
def storeTemporaryRootNodeData(field, value):
"""
Version of setRootNodeData that stores fields in memory, and so are
lost when the application closes.
"""
global __nonPersistentTagData
# We don't want to keep a refcount on the root object, not sure if this is
# stable....
obj = id(nuke.root())
objDict = __nonPersistentTagData.setdefault(obj, {})
objDict[field] = value
|
the-stack_106_17744
|
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 - 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from keystone import exception
from keystone.i18n import _, _LW
from keystone.models import token_model
AUTH_CONTEXT_ENV = 'KEYSTONE_AUTH_CONTEXT'
"""Environment variable used to convey the Keystone auth context.
Auth context is essentially the user credential used for policy enforcement.
It is a dictionary with the following attributes:
* ``user_id``: user ID of the principal
* ``project_id`` (optional): project ID of the scoped project if auth is
project-scoped
* ``domain_id`` (optional): domain ID of the scoped domain if auth is
domain-scoped
* ``roles`` (optional): list of role names for the given scope
* ``group_ids``: list of group IDs for which the API user has membership
"""
LOG = log.getLogger(__name__)
def token_to_auth_context(token):
if not isinstance(token, token_model.KeystoneToken):
raise exception.UnexpectedError(_('token reference must be a '
'KeystoneToken type, got: %s') %
type(token))
auth_context = {'token': token}
try:
auth_context['user_id'] = token.user_id
except KeyError:
LOG.warning(_LW('RBAC: Invalid user data in token'))
raise exception.Unauthorized()
if token.project_scoped:
auth_context['scope'] = 'project'
auth_context['scope_project_id'] = token.project_id
auth_context['scope_domain_id'] = token.project_domain_id
elif token.domain_scoped:
auth_context['scope'] = 'domain'
auth_context['scope_domain_id'] = token.domain_id
else:
LOG.debug('RBAC: Proceeding without project or domain scope')
roles = token.role_names
if roles:
auth_context['roles'] = roles
auth_context['domain_id'] = token.user_domain_id
return auth_context
|
the-stack_106_17745
|
# -*- coding: utf-8 -*-
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
#
# Imports
#
import copy
import errno
import itertools
import os
import platform
import signal
import sys
import threading
import time
import warnings
from collections import deque
from functools import partial
from . import cpu_count, get_context
from . import util
from .common import (
TERM_SIGNAL, human_status, pickle_loads, reset_signals, restart_state,
)
from .compat import get_errno, mem_rss, send_offset
from .einfo import ExceptionInfo
from .dummy import DummyProcess
from .exceptions import (
CoroStop,
RestartFreqExceeded,
SoftTimeLimitExceeded,
Terminated,
TimeLimitExceeded,
TimeoutError,
WorkerLostError,
)
from .five import Empty, Queue, range, values, reraise, monotonic
from .util import Finalize, debug, warning
MAXMEM_USED_FMT = """\
child process exiting after exceeding memory limit ({0}KiB / {1}KiB)
"""
PY3 = sys.version_info[0] == 3
if platform.system() == 'Windows': # pragma: no cover
# On Windows os.kill calls TerminateProcess which cannot be
# handled by # any process, so this is needed to terminate the task
# *and its children* (if any).
from ._win import kill_processtree as _kill # noqa
SIGKILL = TERM_SIGNAL
else:
from os import kill as _kill # noqa
SIGKILL = signal.SIGKILL
try:
TIMEOUT_MAX = threading.TIMEOUT_MAX
except AttributeError: # pragma: no cover
TIMEOUT_MAX = 1e10 # noqa
if sys.version_info >= (3, 3):
_Semaphore = threading.Semaphore
else:
# Semaphore is a factory function pointing to _Semaphore
_Semaphore = threading._Semaphore # noqa
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Constants representing the state of a job
#
ACK = 0
READY = 1
TASK = 2
NACK = 3
DEATH = 4
#
# Exit code constants
#
EX_OK = 0
EX_FAILURE = 1
EX_RECYCLE = 0x9B
# Signal used for soft time limits.
SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None)
#
# Miscellaneous
#
LOST_WORKER_TIMEOUT = 10.0
EX_OK = getattr(os, "EX_OK", 0)
GUARANTEE_MESSAGE_CONSUMPTION_RETRY_LIMIT = 300
GUARANTEE_MESSAGE_CONSUMPTION_RETRY_INTERVAL = 0.1
job_counter = itertools.count()
Lock = threading.Lock
def _get_send_offset(connection):
try:
native = connection.send_offset
except AttributeError:
native = None
if native is None:
return partial(send_offset, connection.fileno())
return native
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
def error(msg, *args, **kwargs):
util.get_logger().error(msg, *args, **kwargs)
def stop_if_not_current(thread, timeout=None):
if thread is not threading.current_thread():
thread.stop(timeout)
class LaxBoundedSemaphore(_Semaphore):
"""Semaphore that checks that # release is <= # acquires,
but ignores if # releases >= value."""
def shrink(self):
self._initial_value -= 1
self.acquire()
if PY3:
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value)
self._initial_value = value
def grow(self):
with self._cond:
self._initial_value += 1
self._value += 1
self._cond.notify()
def release(self):
cond = self._cond
with cond:
if self._value < self._initial_value:
self._value += 1
cond.notify_all()
def clear(self):
while self._value < self._initial_value:
_Semaphore.release(self)
else:
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def grow(self):
cond = self._Semaphore__cond
with cond:
self._initial_value += 1
self._Semaphore__value += 1
cond.notify()
def release(self): # noqa
cond = self._Semaphore__cond
with cond:
if self._Semaphore__value < self._initial_value:
self._Semaphore__value += 1
cond.notifyAll()
def clear(self): # noqa
while self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
#
# Exceptions
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
def __str__(self):
return "Error sending result: '%r'. Reason: '%r'." % (
self.value, self.exc)
class WorkersJoined(Exception):
"""All workers have terminated."""
def soft_timeout_sighandler(signum, frame):
raise SoftTimeLimitExceeded()
#
# Code run by worker processes
#
class Worker(object):
def __init__(self, inq, outq, synq=None, initializer=None, initargs=(),
maxtasks=None, sentinel=None, on_exit=None,
sigprotection=True, wrap_exception=True,
max_memory_per_child=None, on_ready_counter=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
self.initializer = initializer
self.initargs = initargs
self.maxtasks = maxtasks
self.max_memory_per_child = max_memory_per_child
self._shutdown = sentinel
self.on_exit = on_exit
self.sigprotection = sigprotection
self.inq, self.outq, self.synq = inq, outq, synq
self.wrap_exception = wrap_exception # XXX cannot disable yet
self.on_ready_counter = on_ready_counter
self.contribute_to_object(self)
def contribute_to_object(self, obj):
obj.inq, obj.outq, obj.synq = self.inq, self.outq, self.synq
obj.inqW_fd = self.inq._writer.fileno() # inqueue write fd
obj.outqR_fd = self.outq._reader.fileno() # outqueue read fd
if self.synq:
obj.synqR_fd = self.synq._reader.fileno() # synqueue read fd
obj.synqW_fd = self.synq._writer.fileno() # synqueue write fd
obj.send_syn_offset = _get_send_offset(self.synq._writer)
else:
obj.synqR_fd = obj.synqW_fd = obj._send_syn_offset = None
obj._quick_put = self.inq._writer.send
obj._quick_get = self.outq._reader.recv
obj.send_job_offset = _get_send_offset(self.inq._writer)
return obj
def __reduce__(self):
return self.__class__, (
self.inq, self.outq, self.synq, self.initializer,
self.initargs, self.maxtasks, self._shutdown, self.on_exit,
self.sigprotection, self.wrap_exception, self.max_memory_per_child,
)
def __call__(self):
_exit = sys.exit
_exitcode = [None]
def exit(status=None):
_exitcode[0] = status
return _exit(status)
sys.exit = exit
pid = os.getpid()
self._make_child_methods()
self.after_fork()
self.on_loop_start(pid=pid) # callback on loop start
try:
sys.exit(self.workloop(pid=pid))
except Exception as exc:
error('Pool process %r error: %r', self, exc, exc_info=1)
self._do_exit(pid, _exitcode[0], exc)
finally:
self._do_exit(pid, _exitcode[0], None)
def _do_exit(self, pid, exitcode, exc=None):
if exitcode is None:
exitcode = EX_FAILURE if exc else EX_OK
if self.on_exit is not None:
self.on_exit(pid, exitcode)
if sys.platform != 'win32':
try:
self.outq.put((DEATH, (pid, exitcode)))
time.sleep(1)
finally:
os._exit(exitcode)
else:
os._exit(exitcode)
def on_loop_start(self, pid):
pass
def prepare_result(self, result):
return result
def workloop(self, debug=debug, now=monotonic, pid=None):
pid = pid or os.getpid()
put = self.outq.put
inqW_fd = self.inqW_fd
synqW_fd = self.synqW_fd
maxtasks = self.maxtasks
max_memory_per_child = self.max_memory_per_child or 0
prepare_result = self.prepare_result
wait_for_job = self.wait_for_job
_wait_for_syn = self.wait_for_syn
def wait_for_syn(jid):
i = 0
while 1:
if i > 60:
error('!!!WAIT FOR ACK TIMEOUT: job:%r fd:%r!!!',
jid, self.synq._reader.fileno(), exc_info=1)
req = _wait_for_syn()
if req:
type_, args = req
if type_ == NACK:
return False
assert type_ == ACK
return True
i += 1
completed = 0
try:
while maxtasks is None or (maxtasks and completed < maxtasks):
req = wait_for_job()
if req:
type_, args_ = req
assert type_ == TASK
job, i, fun, args, kwargs = args_
put((ACK, (job, i, now(), pid, synqW_fd)))
if _wait_for_syn:
confirm = wait_for_syn(job)
if not confirm:
continue # received NACK
try:
result = (True, prepare_result(fun(*args, **kwargs)))
except Exception:
result = (False, ExceptionInfo())
try:
put((READY, (job, i, result, inqW_fd)))
except Exception as exc:
_, _, tb = sys.exc_info()
try:
wrapped = MaybeEncodingError(exc, result[1])
einfo = ExceptionInfo((
MaybeEncodingError, wrapped, tb,
))
put((READY, (job, i, (False, einfo), inqW_fd)))
finally:
del(tb)
completed += 1
if max_memory_per_child > 0:
used_kb = mem_rss()
if used_kb <= 0:
error('worker unable to determine memory usage')
if used_kb > 0 and used_kb > max_memory_per_child:
warning(MAXMEM_USED_FMT.format(
used_kb, max_memory_per_child))
return EX_RECYCLE
debug('worker exiting after %d tasks', completed)
if maxtasks:
return EX_RECYCLE if completed == maxtasks else EX_FAILURE
return EX_OK
finally:
# Before exiting the worker, we want to ensure that that all
# messages produced by the worker have been consumed by the main
# process. This prevents the worker being terminated prematurely
# and messages being lost.
self._ensure_messages_consumed(completed=completed)
def _ensure_messages_consumed(self, completed):
""" Returns true if all messages sent out have been received and
consumed within a reasonable amount of time """
if not self.on_ready_counter:
return False
for retry in range(GUARANTEE_MESSAGE_CONSUMPTION_RETRY_LIMIT):
if self.on_ready_counter.value >= completed:
debug('ensured messages consumed after %d retries', retry)
return True
time.sleep(GUARANTEE_MESSAGE_CONSUMPTION_RETRY_INTERVAL)
warning('could not ensure all messages were consumed prior to '
'exiting')
return False
def after_fork(self):
if hasattr(self.inq, '_writer'):
self.inq._writer.close()
if hasattr(self.outq, '_reader'):
self.outq._reader.close()
if self.initializer is not None:
self.initializer(*self.initargs)
# Make sure all exiting signals call finally: blocks.
# This is important for the semaphore to be released.
reset_signals(full=self.sigprotection)
# install signal handler for soft timeouts.
if SIG_SOFT_TIMEOUT is not None:
signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler)
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
except AttributeError:
pass
def _make_recv_method(self, conn):
get = conn.get
if hasattr(conn, '_reader'):
_poll = conn._reader.poll
if hasattr(conn, 'get_payload') and conn.get_payload:
get_payload = conn.get_payload
def _recv(timeout, loads=pickle_loads):
return True, loads(get_payload())
else:
def _recv(timeout): # noqa
if _poll(timeout):
return True, get()
return False, None
else:
def _recv(timeout): # noqa
try:
return True, get(timeout=timeout)
except Queue.Empty:
return False, None
return _recv
def _make_child_methods(self, loads=pickle_loads):
self.wait_for_job = self._make_protected_receive(self.inq)
self.wait_for_syn = (self._make_protected_receive(self.synq)
if self.synq else None)
def _make_protected_receive(self, conn):
_receive = self._make_recv_method(conn)
should_shutdown = self._shutdown.is_set if self._shutdown else None
def receive(debug=debug):
if should_shutdown and should_shutdown():
debug('worker got sentinel -- exiting')
raise SystemExit(EX_OK)
try:
ready, req = _receive(1.0)
if not ready:
return None
except (EOFError, IOError) as exc:
if get_errno(exc) == errno.EINTR:
return None # interrupted, maybe by gdb
debug('worker got %s -- exiting', type(exc).__name__)
raise SystemExit(EX_FAILURE)
if req is None:
debug('worker got sentinel -- exiting')
raise SystemExit(EX_FAILURE)
return req
return receive
#
# Class representing a process pool
#
class PoolThread(DummyProcess):
def __init__(self, *args, **kwargs):
DummyProcess.__init__(self)
self._state = RUN
self._was_started = False
self.daemon = True
def run(self):
try:
return self.body()
except RestartFreqExceeded as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
_kill(os.getpid(), TERM_SIGNAL)
sys.exit()
except Exception as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
os._exit(1)
def start(self, *args, **kwargs):
self._was_started = True
super(PoolThread, self).start(*args, **kwargs)
def on_stop_not_started(self):
pass
def stop(self, timeout=None):
if self._was_started:
self.join(timeout)
return
self.on_stop_not_started()
def terminate(self):
self._state = TERMINATE
def close(self):
self._state = CLOSE
class Supervisor(PoolThread):
def __init__(self, pool):
self.pool = pool
super(Supervisor, self).__init__()
def body(self):
debug('worker handler starting')
time.sleep(0.8)
pool = self.pool
try:
# do a burst at startup to verify that we can start
# our pool processes, and in that time we lower
# the max restart frequency.
prev_state = pool.restart_state
pool.restart_state = restart_state(10 * pool._processes, 1)
for _ in range(10):
if self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.1)
# Keep maintaing workers until the cache gets drained, unless
# the pool is termianted
pool.restart_state = prev_state
while self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.8)
except RestartFreqExceeded:
pool.close()
pool.join()
raise
debug('worker handler exiting')
class TaskHandler(PoolThread):
def __init__(self, taskqueue, put, outqueue, pool, cache):
self.taskqueue = taskqueue
self.put = put
self.outqueue = outqueue
self.pool = pool
self.cache = cache
super(TaskHandler, self).__init__()
def body(self):
cache = self.cache
taskqueue = self.taskqueue
put = self.put
for taskseq, set_length in iter(taskqueue.get, None):
task = None
i = -1
try:
for i, task in enumerate(taskseq):
if self._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
except Exception:
job, ind = task[:2]
try:
cache[job]._set(ind, (False, ExceptionInfo()))
except KeyError:
pass
else:
if set_length:
debug('doing set_length()')
set_length(i + 1)
continue
break
except Exception:
job, ind = task[:2] if task else (0, 0)
if job in cache:
cache[job]._set(ind + 1, (False, ExceptionInfo()))
if set_length:
util.debug('doing set_length()')
set_length(i + 1)
else:
debug('task handler got sentinel')
self.tell_others()
def tell_others(self):
outqueue = self.outqueue
put = self.put
pool = self.pool
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
def on_stop_not_started(self):
self.tell_others()
class TimeoutHandler(PoolThread):
def __init__(self, processes, cache, t_soft, t_hard):
self.processes = processes
self.cache = cache
self.t_soft = t_soft
self.t_hard = t_hard
self._it = None
super(TimeoutHandler, self).__init__()
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self.processes)
if proc.pid == pid
), (None, None))
def on_soft_timeout(self, job):
debug('soft time limit exceeded for %r', job)
process, _index = self._process_by_pid(job._worker_pid)
if not process:
return
# Run timeout callback
job.handle_timeout(soft=True)
try:
_kill(job._worker_pid, SIG_SOFT_TIMEOUT)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
def on_hard_timeout(self, job):
if job.ready():
return
debug('hard time limit exceeded for %r', job)
# Remove from cache and set return value to an exception
try:
raise TimeLimitExceeded(job._timeout)
except TimeLimitExceeded:
job._set(job._job, (False, ExceptionInfo()))
else: # pragma: no cover
pass
# Remove from _pool
process, _index = self._process_by_pid(job._worker_pid)
# Run timeout callback
job.handle_timeout(soft=False)
if process:
self._trywaitkill(process)
def _trywaitkill(self, worker):
debug('timeout: sending TERM to %s', worker._name)
try:
if os.getpgid(worker.pid) == worker.pid:
debug("worker %s is a group leader. It is safe to kill (SIGTERM) the whole group", worker.pid)
os.killpg(os.getpgid(worker.pid), signal.SIGTERM)
else:
worker.terminate()
except OSError:
pass
else:
if worker._popen.wait(timeout=0.1):
return
debug('timeout: TERM timed-out, now sending KILL to %s', worker._name)
try:
if os.getpgid(worker.pid) == worker.pid:
debug("worker %s is a group leader. It is safe to kill (SIGKILL) the whole group", worker.pid)
os.killpg(os.getpgid(worker.pid), signal.SIGKILL)
else:
_kill(worker.pid, SIGKILL)
except OSError:
pass
def handle_timeouts(self):
t_hard, t_soft = self.t_hard, self.t_soft
dirty = set()
on_soft_timeout = self.on_soft_timeout
on_hard_timeout = self.on_hard_timeout
def _timed_out(start, timeout):
if not start or not timeout:
return False
if monotonic() >= start + timeout:
return True
# Inner-loop
while self._state == RUN:
# Perform a shallow copy before iteration because keys can change.
# A deep copy fails (on shutdown) due to thread.lock objects.
# https://github.com/celery/billiard/issues/260
cache = copy.copy(self.cache)
# Remove dirty items not in cache anymore
if dirty:
dirty = set(k for k in dirty if k in cache)
for i, job in cache.items():
ack_time = job._time_accepted
soft_timeout = job._soft_timeout
if soft_timeout is None:
soft_timeout = t_soft
hard_timeout = job._timeout
if hard_timeout is None:
hard_timeout = t_hard
if _timed_out(ack_time, hard_timeout):
on_hard_timeout(job)
elif i not in dirty and _timed_out(ack_time, soft_timeout):
on_soft_timeout(job)
dirty.add(i)
yield
def body(self):
while self._state == RUN:
try:
for _ in self.handle_timeouts():
time.sleep(1.0) # don't spin
except CoroStop:
break
debug('timeout handler exiting')
def handle_event(self, *args):
if self._it is None:
self._it = self.handle_timeouts()
try:
next(self._it)
except StopIteration:
self._it = None
class ResultHandler(PoolThread):
def __init__(self, outqueue, get, cache, poll,
join_exited_workers, putlock, restart_state,
check_timeouts, on_job_ready, on_ready_counters=None):
self.outqueue = outqueue
self.get = get
self.cache = cache
self.poll = poll
self.join_exited_workers = join_exited_workers
self.putlock = putlock
self.restart_state = restart_state
self._it = None
self._shutdown_complete = False
self.check_timeouts = check_timeouts
self.on_job_ready = on_job_ready
self.on_ready_counters = on_ready_counters
self._make_methods()
super(ResultHandler, self).__init__()
def on_stop_not_started(self):
# used when pool started without result handler thread.
self.finish_at_shutdown(handle_timeouts=True)
def _make_methods(self):
cache = self.cache
putlock = self.putlock
restart_state = self.restart_state
on_job_ready = self.on_job_ready
def on_ack(job, i, time_accepted, pid, synqW_fd):
restart_state.R = 0
try:
cache[job]._ack(i, time_accepted, pid, synqW_fd)
except (KeyError, AttributeError):
# Object gone or doesn't support _ack (e.g. IMAPIterator).
pass
def on_ready(job, i, obj, inqW_fd):
if on_job_ready is not None:
on_job_ready(job, i, obj, inqW_fd)
try:
item = cache[job]
except KeyError:
return
if self.on_ready_counters:
worker_pid = next(iter(item.worker_pids()), None)
if worker_pid and worker_pid in self.on_ready_counters:
on_ready_counter = self.on_ready_counters[worker_pid]
with on_ready_counter.get_lock():
on_ready_counter.value += 1
if not item.ready():
if putlock is not None:
putlock.release()
try:
item._set(i, obj)
except KeyError:
pass
def on_death(pid, exitcode):
try:
os.kill(pid, TERM_SIGNAL)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
state_handlers = self.state_handlers = {
ACK: on_ack, READY: on_ready, DEATH: on_death
}
def on_state_change(task):
state, args = task
try:
state_handlers[state](*args)
except KeyError:
debug("Unknown job state: %s (args=%s)", state, args)
self.on_state_change = on_state_change
def _process_result(self, timeout=1.0):
poll = self.poll
on_state_change = self.on_state_change
while 1:
try:
ready, task = poll(timeout)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
raise CoroStop()
if self._state:
assert self._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
raise CoroStop()
if ready:
if task is None:
debug('result handler got sentinel')
raise CoroStop()
on_state_change(task)
if timeout != 0: # blocking
break
else:
break
yield
def handle_event(self, fileno=None, events=None):
if self._state == RUN:
if self._it is None:
self._it = self._process_result(0) # non-blocking
try:
next(self._it)
except (StopIteration, CoroStop):
self._it = None
def body(self):
debug('result handler starting')
try:
while self._state == RUN:
try:
for _ in self._process_result(1.0): # blocking
pass
except CoroStop:
break
finally:
self.finish_at_shutdown()
def finish_at_shutdown(self, handle_timeouts=False):
self._shutdown_complete = True
get = self.get
outqueue = self.outqueue
cache = self.cache
poll = self.poll
join_exited_workers = self.join_exited_workers
check_timeouts = self.check_timeouts
on_state_change = self.on_state_change
time_terminate = None
while cache and self._state != TERMINATE:
if check_timeouts is not None:
check_timeouts()
try:
ready, task = poll(1.0)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
return
if ready:
if task is None:
debug('result handler ignoring extra sentinel')
continue
on_state_change(task)
try:
join_exited_workers(shutdown=True)
except WorkersJoined:
now = monotonic()
if not time_terminate:
time_terminate = now
else:
if now - time_terminate > 5.0:
debug('result handler exiting: timed out')
break
debug('result handler: all workers terminated, '
'timeout in %ss',
abs(min(now - time_terminate - 5.0, 0)))
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), self._state)
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
_wrap_exception = True
Worker = Worker
Supervisor = Supervisor
TaskHandler = TaskHandler
TimeoutHandler = TimeoutHandler
ResultHandler = ResultHandler
SoftTimeLimitExceeded = SoftTimeLimitExceeded
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, timeout=None, soft_timeout=None,
lost_worker_timeout=None,
max_restarts=None, max_restart_freq=1,
on_process_up=None,
on_process_down=None,
on_timeout_set=None,
on_timeout_cancel=None,
threads=True,
semaphore=None,
putlocks=False,
allow_restart=False,
synack=False,
on_process_exit=None,
context=None,
max_memory_per_child=None,
enable_timeouts=False,
**kwargs):
self._ctx = context or get_context()
self.synack = synack
self._setup_queues()
self._taskqueue = Queue()
self._cache = {}
self._state = RUN
self.timeout = timeout
self.soft_timeout = soft_timeout
self._maxtasksperchild = maxtasksperchild
self._max_memory_per_child = max_memory_per_child
self._initializer = initializer
self._initargs = initargs
self._on_process_exit = on_process_exit
self.lost_worker_timeout = lost_worker_timeout or LOST_WORKER_TIMEOUT
self.on_process_up = on_process_up
self.on_process_down = on_process_down
self.on_timeout_set = on_timeout_set
self.on_timeout_cancel = on_timeout_cancel
self.threads = threads
self.readers = {}
self.allow_restart = allow_restart
self.enable_timeouts = bool(
enable_timeouts or
self.timeout is not None or
self.soft_timeout is not None
)
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
self._processes = self.cpu_count() if processes is None else processes
self.max_restarts = max_restarts or round(self._processes * 100)
self.restart_state = restart_state(max_restarts, max_restart_freq or 1)
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
if on_process_exit is not None and not callable(on_process_exit):
raise TypeError('on_process_exit must be callable')
self._Process = self._ctx.Process
self._pool = []
self._poolctrl = {}
self._on_ready_counters = {}
self.putlocks = putlocks
self._putlock = semaphore or LaxBoundedSemaphore(self._processes)
for i in range(self._processes):
self._create_worker_process(i)
self._worker_handler = self.Supervisor(self)
if threads:
self._worker_handler.start()
self._task_handler = self.TaskHandler(self._taskqueue,
self._quick_put,
self._outqueue,
self._pool,
self._cache)
if threads:
self._task_handler.start()
self.check_timeouts = None
# Thread killing timedout jobs.
if self.enable_timeouts:
self._timeout_handler = self.TimeoutHandler(
self._pool, self._cache,
self.soft_timeout, self.timeout,
)
self._timeout_handler_mutex = Lock()
self._timeout_handler_started = False
self._start_timeout_handler()
# If running without threads, we need to check for timeouts
# while waiting for unfinished work at shutdown.
if not threads:
self.check_timeouts = self._timeout_handler.handle_event
else:
self._timeout_handler = None
self._timeout_handler_started = False
self._timeout_handler_mutex = None
# Thread processing results in the outqueue.
self._result_handler = self.create_result_handler()
self.handle_result_event = self._result_handler.handle_event
if threads:
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue,
self._pool, self._worker_handler, self._task_handler,
self._result_handler, self._cache,
self._timeout_handler,
self._help_stuff_finish_args()),
exitpriority=15,
)
def Process(self, *args, **kwds):
return self._Process(*args, **kwds)
def WorkerProcess(self, worker):
return worker.contribute_to_object(self.Process(target=worker))
def create_result_handler(self, **extra_kwargs):
return self.ResultHandler(
self._outqueue, self._quick_get, self._cache,
self._poll_result, self._join_exited_workers,
self._putlock, self.restart_state, self.check_timeouts,
self.on_job_ready, on_ready_counters=self._on_ready_counters,
**extra_kwargs
)
def on_job_ready(self, job, i, obj, inqW_fd):
pass
def _help_stuff_finish_args(self):
return self._inqueue, self._task_handler, self._pool
def cpu_count(self):
try:
return cpu_count()
except NotImplementedError:
return 1
def handle_result_event(self, *args):
return self._result_handler.handle_event(*args)
def _process_register_queues(self, worker, queues):
pass
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self._pool)
if proc.pid == pid
), (None, None))
def get_process_queues(self):
return self._inqueue, self._outqueue, None
def _create_worker_process(self, i):
sentinel = self._ctx.Event() if self.allow_restart else None
inq, outq, synq = self.get_process_queues()
on_ready_counter = self._ctx.Value('i')
w = self.WorkerProcess(self.Worker(
inq, outq, synq, self._initializer, self._initargs,
self._maxtasksperchild, sentinel, self._on_process_exit,
# Need to handle all signals if using the ipc semaphore,
# to make sure the semaphore is released.
sigprotection=self.threads,
wrap_exception=self._wrap_exception,
max_memory_per_child=self._max_memory_per_child,
on_ready_counter=on_ready_counter,
))
self._pool.append(w)
self._process_register_queues(w, (inq, outq, synq))
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.index = i
w.start()
self._poolctrl[w.pid] = sentinel
self._on_ready_counters[w.pid] = on_ready_counter
if self.on_process_up:
self.on_process_up(w)
return w
def process_flush_queues(self, worker):
pass
def _join_exited_workers(self, shutdown=False):
"""Cleanup after any worker processes which have exited due to
reaching their specified lifetime. Returns True if any workers were
cleaned up.
"""
now = None
# The worker may have published a result before being terminated,
# but we have no way to accurately tell if it did. So we wait for
# _lost_worker_timeout seconds before we mark the job with
# WorkerLostError.
for job in [job for job in list(self._cache.values())
if not job.ready() and job._worker_lost]:
now = now or monotonic()
lost_time, lost_ret = job._worker_lost
if now - lost_time > job._lost_worker_timeout:
self.mark_as_worker_lost(job, lost_ret)
if shutdown and not len(self._pool):
raise WorkersJoined()
cleaned, exitcodes = {}, {}
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
exitcode = worker.exitcode
popen = worker._popen
if popen is None or exitcode is not None:
# worker exited
debug('Supervisor: cleaning up worker %d', i)
if popen is not None:
worker.join()
debug('Supervisor: worked %d joined', i)
cleaned[worker.pid] = worker
exitcodes[worker.pid] = exitcode
if exitcode not in (EX_OK, EX_RECYCLE) and \
not getattr(worker, '_controlled_termination', False):
error(
'Process %r pid:%r exited with %r',
worker.name, worker.pid, human_status(exitcode),
exc_info=0,
)
self.process_flush_queues(worker)
del self._pool[i]
del self._poolctrl[worker.pid]
del self._on_ready_counters[worker.pid]
if cleaned:
all_pids = [w.pid for w in self._pool]
for job in list(self._cache.values()):
acked_by_gone = next(
(pid for pid in job.worker_pids()
if pid in cleaned or pid not in all_pids),
None
)
# already accepted by process
if acked_by_gone:
self.on_job_process_down(job, acked_by_gone)
if not job.ready():
exitcode = exitcodes.get(acked_by_gone) or 0
proc = cleaned.get(acked_by_gone)
if proc and getattr(proc, '_job_terminated', False):
job._set_terminated(exitcode)
else:
self.on_job_process_lost(
job, acked_by_gone, exitcode,
)
else:
# started writing to
write_to = job._write_to
# was scheduled to write to
sched_for = job._scheduled_for
if write_to and not write_to._is_alive():
self.on_job_process_down(job, write_to.pid)
elif sched_for and not sched_for._is_alive():
self.on_job_process_down(job, sched_for.pid)
for worker in values(cleaned):
if self.on_process_down:
if not shutdown:
self._process_cleanup_queues(worker)
self.on_process_down(worker)
return list(exitcodes.values())
return []
def on_partial_read(self, job, worker):
pass
def _process_cleanup_queues(self, worker):
pass
def on_job_process_down(self, job, pid_gone):
pass
def on_job_process_lost(self, job, pid, exitcode):
job._worker_lost = (monotonic(), exitcode)
def mark_as_worker_lost(self, job, exitcode):
try:
raise WorkerLostError(
'Worker exited prematurely: {0}.'.format(
human_status(exitcode)),
)
except WorkerLostError:
job._set(None, (False, ExceptionInfo()))
else: # pragma: no cover
pass
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.terminate()
def on_grow(self, n):
pass
def on_shrink(self, n):
pass
def shrink(self, n=1):
for i, worker in enumerate(self._iterinactive()):
self._processes -= 1
if self._putlock:
self._putlock.shrink()
worker.terminate_controlled()
self.on_shrink(1)
if i >= n - 1:
break
else:
raise ValueError("Can't shrink pool. All processes busy!")
def grow(self, n=1):
for i in range(n):
self._processes += 1
if self._putlock:
self._putlock.grow()
self.on_grow(n)
def _iterinactive(self):
for worker in self._pool:
if not self._worker_active(worker):
yield worker
def _worker_active(self, worker):
for job in values(self._cache):
if worker.pid in job.worker_pids():
return True
return False
def _repopulate_pool(self, exitcodes):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
if self._state != RUN:
return
try:
if exitcodes and exitcodes[i] not in (EX_OK, EX_RECYCLE):
self.restart_state.step()
except IndexError:
self.restart_state.step()
self._create_worker_process(self._avail_index())
debug('added worker')
def _avail_index(self):
assert len(self._pool) < self._processes
indices = set(p.index for p in self._pool)
return next(i for i in range(self._processes) if i not in indices)
def did_start_ok(self):
return not self._join_exited_workers()
def _maintain_pool(self):
""""Clean up any exited workers and start replacements for them.
"""
joined = self._join_exited_workers()
self._repopulate_pool(joined)
for i in range(len(joined)):
if self._putlock is not None:
self._putlock.release()
def maintain_pool(self):
if self._worker_handler._state == RUN and self._state == RUN:
try:
self._maintain_pool()
except RestartFreqExceeded:
self.close()
self.join()
raise
except OSError as exc:
if get_errno(exc) == errno.ENOMEM:
reraise(MemoryError,
MemoryError(str(exc)),
sys.exc_info()[2])
raise
def _setup_queues(self):
self._inqueue = self._ctx.SimpleQueue()
self._outqueue = self._ctx.SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _poll_result(timeout):
if self._outqueue._reader.poll(timeout):
return True, self._quick_get()
return False, None
self._poll_result = _poll_result
def _start_timeout_handler(self):
# ensure more than one thread does not start the timeout handler
# thread at once.
if self.threads and self._timeout_handler is not None:
with self._timeout_handler_mutex:
if not self._timeout_handler_started:
self._timeout_handler_started = True
self._timeout_handler.start()
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwargs)`.
'''
if self._state == RUN:
return self.apply_async(func, args, kwds).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
if self._state == RUN:
return self._map_async(func, iterable,
starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
if self._state == RUN:
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
if self._state == RUN:
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1, lost_worker_timeout=None):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1,
lost_worker_timeout=None):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={},
callback=None, error_callback=None, accept_callback=None,
timeout_callback=None, waitforslot=None,
soft_timeout=None, timeout=None, lost_worker_timeout=None,
callbacks_propagate=(),
correlation_id=None):
'''
Asynchronous equivalent of `apply()` method.
Callback is called when the functions return value is ready.
The accept callback is called when the job is accepted to be executed.
Simplified the flow is like this:
>>> def apply_async(func, args, kwds, callback, accept_callback):
... if accept_callback:
... accept_callback()
... retval = func(*args, **kwds)
... if callback:
... callback(retval)
'''
if self._state != RUN:
return
soft_timeout = soft_timeout or self.soft_timeout
timeout = timeout or self.timeout
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
if self._state == RUN:
waitforslot = self.putlocks if waitforslot is None else waitforslot
if waitforslot and self._putlock is not None:
self._putlock.acquire()
result = ApplyResult(
self._cache, callback, accept_callback, timeout_callback,
error_callback, soft_timeout, timeout, lost_worker_timeout,
on_timeout_set=self.on_timeout_set,
on_timeout_cancel=self.on_timeout_cancel,
callbacks_propagate=callbacks_propagate,
send_ack=self.send_ack if self.synack else None,
correlation_id=correlation_id,
)
if timeout or soft_timeout:
# start the timeout handler thread when required.
self._start_timeout_handler()
if self.threads:
self._taskqueue.put(([(TASK, (result._job, None,
func, args, kwds))], None))
else:
self._quick_put((TASK, (result._job, None, func, args, kwds)))
return result
def send_ack(self, response, job, i, fd):
pass
def terminate_job(self, pid, sig=None):
proc, _ = self._process_by_pid(pid)
if proc is not None:
try:
_kill(pid, sig or TERM_SIGNAL)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
else:
proc._controlled_termination = True
proc._job_terminated = True
def map_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous equivalent of `map()` method.
'''
return self._map_async(
func, iterable, mapstar, chunksize, callback, error_callback,
)
def _map_async(self, func, iterable, mapper, chunksize=None,
callback=None, error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
return
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((TASK, (result._job, i, mapper, (x,), {}))
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled',
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
if self._putlock:
self._putlock.clear()
self._worker_handler.close()
self._taskqueue.put(None)
stop_if_not_current(self._worker_handler)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler.terminate()
self._terminate()
@staticmethod
def _stop_task_handler(task_handler):
stop_if_not_current(task_handler)
def join(self):
assert self._state in (CLOSE, TERMINATE)
debug('joining worker handler')
stop_if_not_current(self._worker_handler)
debug('joining task handler')
self._stop_task_handler(self._task_handler)
debug('joining result handler')
stop_if_not_current(self._result_handler)
debug('result handler joined')
for i, p in enumerate(self._pool):
debug('joining worker %s/%s (%r)', i + 1, len(self._pool), p)
if p._popen is not None: # process started?
p.join()
debug('pool join complete')
def restart(self):
for e in values(self._poolctrl):
e.set()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, _pool):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _set_result_sentinel(cls, outqueue, pool):
outqueue.put(None)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler,
result_handler, cache, timeout_handler,
help_stuff_finish_args):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler.terminate()
task_handler.terminate()
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(*help_stuff_finish_args)
result_handler.terminate()
cls._set_result_sentinel(outqueue, pool)
if timeout_handler is not None:
timeout_handler.terminate()
# Terminate workers which haven't already finished
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p._is_alive():
p.terminate()
debug('joining task handler')
cls._stop_task_handler(task_handler)
debug('joining result handler')
result_handler.stop()
if timeout_handler is not None:
debug('joining timeout handler')
timeout_handler.stop(TIMEOUT_MAX)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d', p.pid)
if p._popen is not None:
p.join()
debug('pool workers joined')
@property
def process_sentinels(self):
return [w._popen.sentinel for w in self._pool]
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
_worker_lost = None
_write_to = None
_scheduled_for = None
def __init__(self, cache, callback, accept_callback=None,
timeout_callback=None, error_callback=None, soft_timeout=None,
timeout=None, lost_worker_timeout=LOST_WORKER_TIMEOUT,
on_timeout_set=None, on_timeout_cancel=None,
callbacks_propagate=(), send_ack=None,
correlation_id=None):
self.correlation_id = correlation_id
self._mutex = Lock()
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._accept_callback = accept_callback
self._error_callback = error_callback
self._timeout_callback = timeout_callback
self._timeout = timeout
self._soft_timeout = soft_timeout
self._lost_worker_timeout = lost_worker_timeout
self._on_timeout_set = on_timeout_set
self._on_timeout_cancel = on_timeout_cancel
self._callbacks_propagate = callbacks_propagate or ()
self._send_ack = send_ack
self._accepted = False
self._cancelled = False
self._worker_pid = None
self._time_accepted = None
self._terminated = None
cache[self._job] = self
def __repr__(self):
return '<%s: {id} ack:{ack} ready:{ready}>'.format(
self.__class__.__name__,
id=self._job, ack=self._accepted, ready=self.ready(),
)
def ready(self):
return self._event.isSet()
def accepted(self):
return self._accepted
def successful(self):
assert self.ready()
return self._success
def _cancel(self):
"""Only works if synack is used."""
self._cancelled = True
def discard(self):
self._cache.pop(self._job, None)
def terminate(self, signum):
self._terminated = signum
def _set_terminated(self, signum=None):
try:
raise Terminated(-(signum or 0))
except Terminated:
self._set(None, (False, ExceptionInfo()))
def worker_pids(self):
return [self._worker_pid] if self._worker_pid else []
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value.exception
def safe_apply_callback(self, fun, *args, **kwargs):
if fun:
try:
fun(*args, **kwargs)
except self._callbacks_propagate:
raise
except Exception as exc:
error('Pool callback raised exception: %r', exc,
exc_info=1)
def handle_timeout(self, soft=False):
if self._timeout_callback is not None:
self.safe_apply_callback(
self._timeout_callback, soft=soft,
timeout=self._soft_timeout if soft else self._timeout,
)
def _set(self, i, obj):
with self._mutex:
if self._on_timeout_cancel:
self._on_timeout_cancel(self)
self._success, self._value = obj
self._event.set()
if self._accepted:
# if not accepted yet, then the set message
# was received before the ack, which means
# the ack will remove the entry.
self._cache.pop(self._job, None)
# apply callbacks last
if self._callback and self._success:
self.safe_apply_callback(
self._callback, self._value)
if (self._value is not None and
self._error_callback and not self._success):
self.safe_apply_callback(
self._error_callback, self._value)
def _ack(self, i, time_accepted, pid, synqW_fd):
with self._mutex:
if self._cancelled and self._send_ack:
self._accepted = True
if synqW_fd:
return self._send_ack(NACK, pid, self._job, synqW_fd)
return
self._accepted = True
self._time_accepted = time_accepted
self._worker_pid = pid
if self.ready():
# ack received after set()
self._cache.pop(self._job, None)
if self._on_timeout_set:
self._on_timeout_set(self, self._soft_timeout, self._timeout)
response = ACK
if self._accept_callback:
try:
self._accept_callback(pid, time_accepted)
except self._propagate_errors:
response = NACK
raise
except Exception:
response = NACK
# ignore other errors
finally:
if self._send_ack and synqW_fd:
return self._send_ack(
response, pid, self._job, synqW_fd
)
if self._send_ack and synqW_fd:
self._send_ack(response, pid, self._job, synqW_fd)
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(
self, cache, callback, error_callback=error_callback,
)
self._success = True
self._length = length
self._value = [None] * length
self._accepted = [False] * length
self._worker_pid = [None] * length
self._time_accepted = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length // chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i * self._chunksize:(i + 1) * self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
def _ack(self, i, time_accepted, pid, *args):
start = i * self._chunksize
stop = min((i + 1) * self._chunksize, self._length)
for j in range(start, stop):
self._accepted[j] = True
self._worker_pid[j] = pid
self._time_accepted[j] = time_accepted
if self.ready():
self._cache.pop(self._job, None)
def accepted(self):
return all(self._accepted)
def worker_pids(self):
return [pid for pid in self._worker_pid if pid]
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
_worker_lost = None
def __init__(self, cache, lost_worker_timeout=LOST_WORKER_TIMEOUT):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = deque()
self._index = 0
self._length = None
self._ready = False
self._unsorted = {}
self._worker_pids = []
self._lost_worker_timeout = lost_worker_timeout
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
with self._cond:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
raise TimeoutError
success, value = item
if success:
return value
raise Exception(value)
__next__ = next # XXX
def _set(self, i, obj):
with self._cond:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
self._ready = True
del self._cache[self._job]
def _set_length(self, length):
with self._cond:
self._length = length
if self._index == self._length:
self._ready = True
self._cond.notify()
del self._cache[self._job]
def _ack(self, i, time_accepted, pid, *args):
self._worker_pids.append(pid)
def ready(self):
return self._ready
def worker_pids(self):
return self._worker_pids
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
with self._cond:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
self._ready = True
del self._cache[self._job]
#
#
#
class ThreadPool(Pool):
from .dummy import Process as DummyProcess
Process = DummyProcess
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue()
self._outqueue = Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def _poll_result(timeout):
try:
return True, self._quick_get(timeout=timeout)
except Empty:
return False, None
self._poll_result = _poll_result
@staticmethod
def _help_stuff_finish(inqueue, task_handler, pool):
# put sentinels at head of inqueue to make workers finish
with inqueue.not_empty:
inqueue.queue.clear()
inqueue.queue.extend([None] * len(pool))
inqueue.not_empty.notify_all()
|
the-stack_106_17746
|
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="pyschemagen",
version="0.0.4",
author='alexwhb',
description="A package to generate orator DB schemas from a python dict.",
long_description=long_description,
long_description_content_type="text/markdown",
license='Apache 2.0',
url="https://github.com/alexwhb/PySchemaGen",
packages=['pyschemagen'],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.4',
install_requires=['orator']
)
|
the-stack_106_17747
|
import json
from packlib.base import ProxmoxAction
class NodesNodeStorageStorageRrddataAction(ProxmoxAction):
"""
Read storage RRD statistics.
"""
def run(self, node, storage, timeframe, cf=None, profile_name=None):
super().run(profile_name)
# Only include non None arguments to pass through to proxmox api.
proxmox_kwargs = {}
for api_arg in [
["cf", cf, "string"],
["node", node, "string"],
["storage", storage, "string"],
["timeframe", timeframe, "string"],
]:
if api_arg[1] is None:
continue
if "[n]" in api_arg[0]:
unit_list = json.loads(api_arg[1])
for i, v in enumerate(unit_list):
proxmox_kwargs[api_arg[0].replace("[n]", str(i))] = v
else:
if api_arg[2] == "boolean":
api_arg[1] = int(api_arg[1])
proxmox_kwargs[api_arg[0]] = api_arg[1]
return self.proxmox.get(f"nodes/{node}/storage/{storage}/rrddata", **proxmox_kwargs)
|
the-stack_106_17748
|
import torch
import logging
import os
import io
from torchtext.utils import download_from_url, extract_archive
from torchtext.vocab import build_vocab_from_iterator
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import Vocab
from torchtext.data.functional import numericalize_tokens_from_iterator
URLS = {
'WikiText2':
'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip',
'WikiText103':
'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip',
'PennTreebank':
['https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt',
'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt',
'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt']
}
class LanguageModelingDataset(torch.utils.data.Dataset):
"""Defines a dataset for language modeling.
Currently, we only support the following datasets:
- WikiText2
- WikiText103
- PennTreebank
"""
def __init__(self, data, vocab):
"""Initiate language modeling dataset.
Arguments:
data: a tensor of tokens. tokens are ids after
numericalizing the string tokens.
torch.tensor([token_id_1, token_id_2, token_id_3, token_id1]).long()
vocab: Vocabulary object used for dataset.
Examples:
>>> from torchtext.vocab import build_vocab_from_iterator
>>> data = torch.tensor([token_id_1, token_id_2,
token_id_3, token_id_1]).long()
>>> vocab = build_vocab_from_iterator([['language', 'modeling']])
>>> dataset = LanguageModelingDataset(data, vocab)
"""
super(LanguageModelingDataset, self).__init__()
self.data = data
self.vocab = vocab
def __getitem__(self, i):
return self.data[i]
def __len__(self):
return len(self.data)
def __iter__(self):
for x in self.data:
yield x
def get_vocab(self):
return self.vocab
def _get_datafile_path(key, extracted_files):
for fname in extracted_files:
if key in fname:
return fname
def _setup_datasets(dataset_name, tokenizer=get_tokenizer("basic_english"),
root='.data', vocab=None, removed_tokens=[],
data_select=('train', 'test', 'valid')):
if isinstance(data_select, str):
data_select = [data_select]
if not set(data_select).issubset(set(('train', 'test', 'valid'))):
raise TypeError('data_select is not supported!')
if dataset_name == 'PennTreebank':
extracted_files = []
select_to_index = {'train': 0, 'test': 1, 'valid': 2}
extracted_files = [download_from_url(URLS['PennTreebank'][select_to_index[key]],
root=root) for key in data_select]
else:
dataset_tar = download_from_url(URLS[dataset_name], root=root)
extracted_files = [os.path.join(root, d) for d in extract_archive(dataset_tar)]
_path = {}
for item in data_select:
_path[item] = _get_datafile_path(item, extracted_files)
if vocab is None:
if 'train' not in _path.keys():
raise TypeError("Must pass a vocab if train is not selected.")
logging.info('Building Vocab based on {}'.format(_path['train']))
txt_iter = iter(tokenizer(row) for row in io.open(_path['train'],
encoding="utf8"))
vocab = build_vocab_from_iterator(txt_iter)
logging.info('Vocab has {} entries'.format(len(vocab)))
else:
if not isinstance(vocab, Vocab):
raise TypeError("Passed vocabulary is not of type Vocab")
data = {}
for item in _path.keys():
data[item] = []
logging.info('Creating {} data'.format(item))
txt_iter = iter(tokenizer(row) for row in io.open(_path[item],
encoding="utf8"))
_iter = numericalize_tokens_from_iterator(
vocab, txt_iter, removed_tokens)
for tokens in _iter:
data[item] += [token_id for token_id in tokens]
for key in data_select:
if data[key] == []:
raise TypeError('Dataset {} is empty!'.format(key))
return tuple(LanguageModelingDataset(torch.tensor(data[d]).long(), vocab)
for d in data_select)
def WikiText2(*args, **kwargs):
""" Defines WikiText2 datasets.
Create language modeling dataset: WikiText2
Separately returns the train/test/valid set
Arguments:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
removed_tokens: removed tokens from output dataset (Default: [])
data_select: a string or tupel for the returned datasets
(Default: ('train', 'test','valid'))
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.datasets import WikiText2
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, test_dataset, valid_dataset = WikiText2(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = WikiText2(tokenizer=tokenizer, vocab=vocab,
data_select='valid')
"""
return _setup_datasets(*(("WikiText2",) + args), **kwargs)
def WikiText103(*args, **kwargs):
""" Defines WikiText103 datasets.
Create language modeling dataset: WikiText103
Separately returns the train/test/valid set
Arguments:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
data_select: the returned datasets (Default: ('train', 'test','valid'))
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test').
If 'train' is not in the tuple, an vocab object should be provided which will
be used to process valid and/or test data.
removed_tokens: removed tokens from output dataset (Default: [])
data_select: a string or tupel for the returned datasets
(Default: ('train', 'test','valid'))
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.datasets import WikiText103
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, test_dataset, valid_dataset = WikiText103(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = WikiText103(tokenizer=tokenizer, vocab=vocab,
data_select='valid')
"""
return _setup_datasets(*(("WikiText103",) + args), **kwargs)
def PennTreebank(*args, **kwargs):
""" Defines PennTreebank datasets.
Create language modeling dataset: PennTreebank
Separately returns the train/test/valid set
Arguments:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
removed_tokens: removed tokens from output dataset (Default: [])
data_select: a string or tupel for the returned datasets
(Default: ('train', 'test','valid'))
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.datasets import PennTreebank
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, test_dataset, valid_dataset = PennTreebank(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = PennTreebank(tokenizer=tokenizer, vocab=vocab,
data_select='valid')
"""
return _setup_datasets(*(("PennTreebank",) + args), **kwargs)
|
the-stack_106_17749
|
import os
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import graphviz as gv
class HiddenMarkovModel:
def __init__(
self,
observable_states,
hidden_states,
transition_matrix,
emission_matrix,
title="HMM",
):
"""Initialization function for HiddenMarkovModel
Attributes:
observable_states (list): A list containing the name of each observable state.
hidden_states (list): A list containing the name of each hidden state.
transition_matrix (2-D list): A matrix containing the transition probabilities.
emission_matrix (2-D list): A matrix containing the emission probabilities.
title (str): Title for the HMM project. Output files will be named with this attribute.
"""
self.observable_states = observable_states
self.hidden_states = hidden_states
self.transition_matrix = pd.DataFrame(
data=transition_matrix, columns=hidden_states, index=hidden_states
)
self.emission_matrix = pd.DataFrame(
data=emission_matrix, columns=observable_states, index=hidden_states
)
self.pi = self._calculate_stationary_distribution()
self.title = title
def print_model_info(self):
"""Prints the model in a readable manner."""
print("*" * 50)
print(f"Observable States: {self.observable_states}")
print(f"Emission Matrix:\n{self.emission_matrix}")
print(f"Hidden States: {self.hidden_states}")
print(f"Transition Matrix:\n{self.transition_matrix}")
print(f"Initial Probabilities: {self.pi}")
def visualize_model(self, output_dir="outputs", notebook=False):
"""Creates a transition and emission graph of the model.
Args:
output_dir (str): A directory will be created with this name. If the directory already exists then an error will be raised.
notebook (bool): Whether the model should be visualized for a notebook or a script. If False, then a png will be displayed. If True then the output will be displayed in the IPython cell.
"""
try:
os.mkdir(output_dir)
except FileExistsError:
raise FileExistsError(
"Directory already exists! Please provide a different output directory!"
)
output_loc = output_dir + "/" + self.title
G = nx.MultiDiGraph()
G.add_nodes_from(self.hidden_states)
# Get transition probabilities
hidden_edges = self._get_markov_edges(self.transition_matrix)
for (origin, destination), weight in hidden_edges.items():
G.add_edge(origin, destination, weight=weight, label=weight, color="blue")
# Get emission probabilities
emission_edges = self._get_markov_edges(self.emission_matrix)
for (origin, destination), weight in emission_edges.items():
G.add_edge(origin, destination, weight=weight, label=weight, color="red")
# Create graph and draw with edge labels
pos = nx.drawing.nx_pydot.graphviz_layout(G, prog="dot")
edge_labels = {(n1, n2): d["label"] for n1, n2, d in G.edges(data=True)}
nx.drawing.nx_pydot.write_dot(G, output_loc + ".dot")
s = gv.Source.from_file(output_loc + ".dot", format="png")
if notebook:
from IPython.display import display
display(s)
return
s.view()
def forward(self, input_seq):
"""Runs the Forward Algorithm.
Args:
input_seq (list): A list of the observed input sequence.
Returns:
alpha (np.array): A matrix of the alpha values.
probs (numpy.float64): The computed probability of the input sequence.
"""
input_seq = np.array(input_seq)
n_states = len(self.hidden_states)
T = len(input_seq)
# Convert DataFrame to np.array
emission_matrix = self.emission_matrix.values
transition_matrix = self.transition_matrix.values
# Initialize alpha
alpha = np.zeros((n_states, T))
alpha[:, 0] = self.pi * emission_matrix[:, input_seq[0]]
for t in range(1, T):
for s in range(n_states):
alpha[s, t] = emission_matrix[s, input_seq[t]] * np.sum(
alpha[:, t - 1] * transition_matrix[:, s]
)
probs = alpha[:, -1].sum()
return alpha, probs
def backward(self, input_seq):
"""Runs the Backward Algorithm.
Args:
input_seq (list): A list of the observed input sequence.
Returns:
beta (np.array): A matrix of the beta values.
probs (numpy.float64): The computed probability of the input sequence.
"""
input_seq = np.array(input_seq)
n_states = len(self.hidden_states)
T = len(input_seq)
# Convert DataFrame to np.array
emission_matrix = self.emission_matrix.values
transition_matrix = self.transition_matrix.values
# Initialize beta starting from last
beta = np.zeros((n_states, T))
beta[:, T - 1] = 1.0
for t in range(T - 2, -1, -1):
for s in range(n_states):
beta[s, t] = np.sum(
emission_matrix[:, input_seq[t + 1]]
* beta[:, t + 1]
* transition_matrix[s, :]
)
probs = sum(self.pi * emission_matrix[:, input_seq[0]] * beta[:, 0])
return beta, probs
def viterbi(self, input_seq):
"""Runs the Viterbi Algorithm.
Args:
input_seq (list): A list of the observed input sequence.
Returns:
path (np.array): The output path for given input sequence.
delta (np.array): A matrix of the delta values.
phi (numpy.array): A matrix of the phi values.
"""
input_seq = np.array(input_seq)
n_states = len(self.hidden_states)
T = len(input_seq)
# Convert DataFrame to np.array
emission_matrix = self.emission_matrix.values
transition_matrix = self.transition_matrix.values
# Initial blank path
path = np.zeros(T, dtype=int)
# Delta = Highest probability of any path that reaches state i
delta = np.zeros((n_states, T))
# Phi = Argmax by time step for each state
phi = np.zeros((n_states, T))
# Initialize delta
delta[:, 0] = self.pi * emission_matrix[:, input_seq[0]]
print("*" * 50)
print("Starting Forward Walk")
for t in range(1, T):
for s in range(n_states):
delta[s, t] = (
np.max(delta[:, t - 1] * transition_matrix[:, s])
* emission_matrix[s, input_seq[t]]
)
phi[s, t] = np.argmax(delta[:, t - 1] * transition_matrix[:, s])
print(f"State={s} : Sequence={t} | phi[{s}, {t}]={phi[s, t]}")
print("*" * 50)
print("Start Backtrace")
path[T - 1] = np.argmax(delta[:, T - 1])
for t in range(T - 2, -1, -1):
path[t] = phi[path[t + 1], [t + 1]]
print(f"Path[{t}]={path[t]}")
return path, delta, phi
def _calculate_stationary_distribution(self):
"""Calculates the initial stationary distribution for the model.
Returns:
stationary (np.array): The stationary distribution.
"""
eig_vals, eig_vects = np.linalg.eig(self.transition_matrix.T.values)
_eig_vects = eig_vects[:, np.isclose(eig_vals, 1)]
_eig_vects = _eig_vects[:, 0]
stationary = _eig_vects / _eig_vects.sum()
stationary = stationary.real
return stationary
def _get_markov_edges(self, matrix):
"""Returns the edges between two states.
Args:
matrix (pd.DataFrame): A matrix attribute of the model.
Returns:
edges: A dictionary of the edges between each state.
"""
edges = {}
for col in matrix.columns:
for row in matrix.index:
edges[(row, col)] = matrix.loc[row, col]
return edges
def print_forward_result(alpha, a_prob):
"""Prints the result of the Forward Algorithm.
Args:
alpha (np.array): A matrix of the alpha values.
a_prob (numpy.float64): The computed probability from the alpha values.
"""
print("*" * 50)
print(f"Alpha:\n{alpha}\nProbability of sequence: {a_prob}")
def print_backward_result(beta, b_prob):
"""Prints the result of the Backward Algorithm.
Args:
beta (np.array): A matrix of the beta values.
b_prob (numpy.float64): The computed probability from the beta values.
"""
print("*" * 50)
print(f"Beta:\n{beta}\nProbability of sequence: {b_prob}")
def print_viterbi_result(input_seq, observable_states, hidden_states, path, delta, phi):
"""Prints the result of the Viterbi Algorithm.
Args:
input_seq (list): A list of the observed input sequence.
observable_states (list): A list containing the name of each observable state.
hidden_states (list): A list containing the name of each hidden state.
path (np.array): The output path for given input sequence.
delta (np.array): A matrix of the delta values.
phi (numpy.array): A matrix of the phi values.
"""
print("*" * 50)
print("Viterbi Result")
print(f"Delta:\n{delta}")
print(f"Phi:\n{phi}")
state_path = [hidden_states[p] for p in path]
inv_input_seq = [observable_states[i] for i in input_seq]
print(
f"Result:\n{pd.DataFrame().assign(Observation=inv_input_seq).assign(BestPath=state_path)}"
)
|
the-stack_106_17752
|
import json
import http.client
import csv
#Setup the Parameter of Premiumize and your NAS
params = open('params.csv', "r")
for line in params:
line = line.split(',')
if line[0] == 'synAccName':
synAccName = line[1].rstrip()
elif line[0] == 'synAccPw':
synAccPw = line[1].rstrip()
elif line[0] == 'synIpAddr':
synIpAddr = line[1].rstrip()
elif line[0] == 'premAccID':
premAccID = line[1].rstrip()
elif line[0] == 'premAccPw':
premAccPw = line[1].rstrip()
deletions = True
#Connection Handling
def connection(webAddress, httpType, reqAdress):
conn = http.client.HTTPConnection(webAddress)
conn.request(httpType, reqAdress)
r1 = conn.getresponse()
data = r1.read()
conn.close()
return data
def sConnection(webAddress, httpType, reqAdress):
conn = http.client.HTTPSConnection(webAddress)
conn.request(httpType, reqAdress)
r1 = conn.getresponse()
data = r1.read()
conn.close()
return data
#Creation of Syno Requests
def createSynoRequest(path, api, version, method, param):
return str("/webapi/"+path+"?api="+api+"&version="+version+"&method="+method+"&"+param)
# Step 1 GET API Information
apiInfoRequest = createSynoRequest("query.cgi", "SYNO.API.Info","1", "query", "query=SYNO.API.Auth")
synApiInfo = json.loads(connection(synIpAddr, "GET", apiInfoRequest))
print("Establishing Connection")
#Step 2 Session Login
API_NAME = "SYNO.API.Auth"
CGI_PATH = synApiInfo["data"][API_NAME]["path"]
VERSION = str(synApiInfo["data"][API_NAME]["maxVersion"])
METHOD = "login"
PARAMS = "account="+synAccName+"&passwd="+synAccPw+"&session=DownloadStation&format=cookie"
req = createSynoRequest(CGI_PATH, API_NAME,VERSION, METHOD, PARAMS)
sid = json.loads(connection(synIpAddr, "GET", req))["data"]["sid"]
apiDSTaskRequest = createSynoRequest("query.cgi", "SYNO.API.Info","1", "query", "query=SYNO.DownloadStation.Task")
synDownloadStationConnect = json.loads(connection(synIpAddr, "GET", apiDSTaskRequest))
print("Established Connection")
API_NAME = "SYNO.DownloadStation.Task"
CGI_PATH = synDownloadStationConnect["data"][API_NAME]["path"]
VERSION = str(synDownloadStationConnect["data"][API_NAME]["maxVersion"] -1 )
METHOD = "create"
#Warn for High Limit
premAccountInfo = json.loads(sConnection("www.premiumize.me","GET", "/api/account/info?customer_id="+premAccID+"&pin="+premAccPw))
if premAccountInfo["limit_used"] > 0.9:
print("Download Limit is at 90%")
#optionally exit if the limit is too high
exit()
#Iterate through all Downloads
premTransferList = json.loads(sConnection("www.premiumize.me","GET", "/api/transfer/list?customer_id="+premAccID+"&pin="+premAccPw))
for key in premTransferList["transfers"]:
if key["status"] == "finished":
fileID = key["file_id"]
if fileID == None:
folderID = key["folder_id"]
premFolderList = json.loads(sConnection("www.premiumize.me","GET", "/api/folder/list?id="+folderID+"&customer_id="+premAccID+"&pin="+premAccPw))
content = premFolderList["content"]
for item in content:
if item["type"] == "file":
link = item["link"]
PARAMS = "uri="+link+"&_sid="+sid
apiDownloadReq = createSynoRequest(CGI_PATH, API_NAME,VERSION, METHOD, PARAMS)
connection(synIpAddr, "GET", apiDownloadReq)
else:
continue
else:
premFile = json.loads(sConnection("www.premiumize.me","GET", "/api/item/details?id="+fileID+"&customer_id="+premAccID+"&pin="+premAccPw))
link = premFile["link"]
PARAMS = "uri="+link+"&_sid="+sid
apiDownloadReq = createSynoRequest(CGI_PATH, API_NAME,VERSION, METHOD, PARAMS)
connection(synIpAddr, "GET", apiDownloadReq)
else:
continue
if deletions:
sConnection("www.premiumize.me","POST", "/api/transfer/clearfinished?customer_id="+premAccID+"&pin="+premAccPw)
print("Logging out from "+synIpAddr)
apiLogout = createSynoRequest("auth.cgi", "SYNO.API.Auth","1", "logout", "session=DownloadStation")
data = connection(synIpAddr, "GET", apiLogout)
jsonTable = json.loads(data)
print("All Downloads transfered")
|
the-stack_106_17754
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A distribution contains the meta data for a major grouping of packages
within a :class:Repository, such as all of those used for a major release.
All of the packages in a repo are expected to be compatible with a system,
although some may conflict directly with each other.
A distribution is split into package lists by "component" (a judgement
grouping, normally based on licensing requirements) and "architecture"
(the CPU type that the package was built for).
All combinations of these should have a valid PackageList of packages in
the Repository's Pool.
"""
import zlib
from typing import Optional, List, Dict
from apt import tags
from .exceptions import NonExistentException
from .abstractrepoobject import AbstractRepoObject
from .packagelist import PackageList
_G_ZIPPER = zlib.decompressobj(16 + zlib.MAX_WBITS)
class Distribution(AbstractRepoObject):
"""
A distribution contains the meta data for a major grouping of packages
within a :class:Repository, such as all of those used for a major release.
All of the packages in a repo are expected to be compatible with a system,
although some may conflict directly with each other.
A distribution is split into package lists by "component" (a judgement
grouping, normally based on licensing requirements) and "architecture"
(the CPU type that the package was built for).
All combinations of these should have a valid PackageList of packages in
the Repository's Pool.
"""
distribution: str
_exists: Optional[bool] = None
release_data: Optional[tags.ReleaseFile] = None
_packages: Optional[PackageList] = None
def __init__(self, parent: AbstractRepoObject, name: str):
AbstractRepoObject.__init__(self, parent.repo, parent)
self.distribution = name
def exists(self) -> bool:
"""
Returns whether the distribution currently existing in the repo.
Existing is, in this context, defined as having a parse-able
release file.
If the Repository was created with a GPG context, then the release file
must also have a valid signature (either inline in the InRelease
file, or as part of a Release/Release.gpg file pair)
:return: Whether this distribution exists
"""
if self._exists is not None:
return self._exists
try:
self._exists = bool(self._get_release_file())
except FileNotFoundError:
self._exists = False
return self._exists
def components(self) -> List[str]:
"""
Returns the list of components that are in this Distribution
:return List[str]:
"""
if not self.exists():
raise NonExistentException
return self._get_release_file().components()
def architectures(self) -> List[str]:
"""
Gets the list of architectures that this Distribution supports
:return List[str]: A list of architecture names
"""
if not self.exists():
raise NonExistentException
return self._get_release_file().architectures()
def package_list(self, component: str, architecture: str) -> PackageList:
"""
Gets the package list for a specific component and architecture
in the current distribution.
Information about the packages that are found will also be populated
in the Repository this distribution is from.
:param str component:
:param str architecture:
:return PackageList:
"""
if self._packages is not None:
return self._packages
self._packages = PackageList(self.repo, self)
if not self.exists():
return self._packages
files: Dict[str, tags.FileHash] = self._get_release_file().files
file_data: tags.FileHash
for extension, reader in [('.gz', _G_ZIPPER.decompress), ('', Ellipsis)]:
filename = '{}/binary-{}/Packages{}'.format(component, architecture, extension)
if filename in files:
file_data = files[filename]
break
if not file_data:
raise FileNotFoundError()
contents = self._download_file(
['dists', self.distribution, filename],
file_data, reader
)
for package in tags.read_tag_file(contents):
# noinspection PyProtectedMember
# pylint: disable=W0212
imported_package = self.repo._add_package(package, package['Filename'])
self._packages.add(imported_package)
return self._packages
def _get_release_file(self) -> tags.ReleaseFile:
"""
Download and parses the InRelease/Release files for this Repository.
If the Repository has a GPG Context, the signature will also be verified.
In that case, the file "InRelease" is downloaded first, followed by
the "Release" and detached signature "Release.gpg" if the former was
not available.
Without a GPG context, only "Release" is downloaded.
This function will return None if the listed files are not available,
or if the GPG signatures were not verified.
:return apt_tags.ReleaseFile:
"""
if self.release_data:
return self.release_data
if self.repo and self.repo.gpg:
gpg = self.repo.gpg
else:
gpg = None
release_data = None
# If we have a GPG context, attempt to download the InRelease
# inline-signed file and verify that
if gpg:
try:
release_stream = self._open_file(['dists', self.distribution, 'InRelease'])
# Attempt to verify the release file
release_gpg_data = gpg.verify_file(release_stream)
release_stream.close()
if release_gpg_data.valid:
release_data = release_gpg_data.data
# A Not Found here still allows us to fall back to the Release file
except FileNotFoundError:
pass
# If we have no data, either InRelease was wrong, or we have no GPG
if not release_data:
release_stream = self._open_file(['dists', self.distribution, 'Release'])
# If we have GPG context, check the detached signature
# otherwise, just read the data
if gpg:
signature_stream = self._open_file(['dists', self.distribution, 'Release.gpg'])
signature_data = release_stream.read()
signature_stream.close()
gpg.verify_file(release_stream, signature_data)
else:
release_data = release_stream.read()
# Make sure the stream gets closed
release_stream.close()
# Parse the data
self.release_data = next(tags.read_tag_file(release_data, tags.ReleaseFile))
return self.release_data
def __repr__(self):
return '<apt.repo.Distribution \'{0.distribution}\' of {0.repo.base_uri}>'.format(self)
|
the-stack_106_17755
|
####################
# Import Libraries
####################
import os
import sys
from PIL import Image
import cv2
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.metrics import Accuracy
from pytorch_lightning import loggers
from pytorch_lightning import seed_everything
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import StratifiedKFold
from sklearn import model_selection
import albumentations as A
import timm
from omegaconf import OmegaConf
import glob
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from nnAudio.Spectrogram import CQT1992v2, CQT2010v2
from scipy import signal
import pycbc
from pycbc.filter import highpass_fir, lowpass_fir
from scipy import signal
####################
# Utils
####################
def get_score(y_true, y_pred):
score = roc_auc_score(y_true, y_pred)
return score
def load_pytorch_model(ckpt_name, model, ignore_suffix='model'):
state_dict = torch.load(ckpt_name, map_location='cpu')["state_dict"]
new_state_dict = {}
for k, v in state_dict.items():
name = k
if name.startswith(str(ignore_suffix)+"."):
name = name.replace(str(ignore_suffix)+".", "", 1) # remove `model.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict, strict=False)
return model
def filt(waves):
#window = signal.tukey(4096,0.1)
waves = [pycbc.filter.resample.highpass_fir(pycbc.types.TimeSeries(w, epoch=0, delta_t=1.0/2048), frequency=20, order=100) for w in waves]
waves = [pycbc.filter.resample.notch_fir(w, f1=30, f2=80, order=10, beta=5) for w in waves]
waves = [pycbc.filter.resample.lowpass_fir(w, frequency=512, order=5) for w in waves]
waves = np.array([np.array(w) for w in waves])
#waves = np.array([np.array(w)*window for w in waves])
return waves
####################
# Config
####################
conf_dict = {'batch_size': 8,#32,
'epoch': 30,
'height': 512,#640,
'width': 512,
'model_name': 'efficientnet_b0',
'lr': 0.001,
'fold': 0,
'drop_rate': 0.0,
'drop_path_rate': 0.0,
'data_dir': '../input/seti-breakthrough-listen',
'model_path': None,
'output_dir': './',
'snap': 1,
'fmin': 10,
'fmax': 512,
'scale': 1.5}
conf_base = OmegaConf.create(conf_dict)
####################
# Dataset
####################
class G2NetDataset(Dataset):
def __init__(self, df, transform=None, conf=None, train=True):
self.df = df.reset_index(drop=True)
self.dir_names = df['dir'].values
self.labels = df['target'].values
self.wave_transform = [
CQT1992v2(sr=2048, fmin=conf.fmin, fmax=conf.fmax, hop_length=8, bins_per_octave=8, window='flattop', filter_scale=conf.scale),
CQT1992v2(sr=2048, fmin=conf.fmin, fmax=conf.fmax, hop_length=8, bins_per_octave=8, window='blackmanharris', filter_scale=conf.scale),
CQT1992v2(sr=2048, fmin=conf.fmin, fmax=conf.fmax, hop_length=8, bins_per_octave=8, window='nuttall', filter_scale=conf.scale)]
#self.wave_transform = CQT1992v2(sr=2048, fmin=10, fmax=1024, hop_length=8, bins_per_octave=8, window='flattop')
#self.wave_transform = CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=1, bins_per_octave=14, window='flattop')
#self.wave_transform = CQT2010v2(sr=2048, fmin=10, fmax=1024, hop_length=32, n_bins=32, bins_per_octave=8, window='flattop')
self.stat = [
[0.013205823003608798,0.037445450696502146],
[0.009606230606511236,0.02489221471650526], # 10000 sample
[0.009523397709568962,0.024628402379527688],] # 10000 sample
# hop lengthは変えてみたほうが良いかも
self.transform = transform
self.conf = conf
self.train = train
def __len__(self):
return len(self.df)
def apply_qtransform(self, waves, transform):
#print(waves.shape)
#waves = np.hstack(waves)
#print(np.max(np.abs(waves), axis=1))
#waves = waves / np.max(np.abs(waves), axis=1, keepdims=True)
#waves = waves / np.max(waves)
waves = waves / 4.6152116213830774e-20
waves = torch.from_numpy(waves).float()
image = transform(waves)
return image
def __getitem__(self, idx):
img_id = self.df.loc[idx, 'id']
file_path = os.path.join(self.dir_names[idx],"{}/{}/{}/{}.npy".format(img_id[0], img_id[1], img_id[2], img_id))
waves = np.load(file_path)
waves = filt(waves)
label = torch.tensor([self.labels[idx]]).float()
image1 = self.apply_qtransform(waves, self.wave_transform[0])
image1 = image1.squeeze().numpy().transpose(1,2,0)
image1 = cv2.vconcat([image1[:,:,0],image1[:,:,1],image1[:,:,2]])
image1 = (image1-self.stat[0][0])/self.stat[0][1]
image1 = cv2.resize(image1, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image2 = self.apply_qtransform(waves, self.wave_transform[1])
image2 = image2.squeeze().numpy().transpose(1,2,0)
image2 = cv2.vconcat([image2[:,:,0],image2[:,:,1],image2[:,:,2]])
image2 = (image2-self.stat[1][0])/self.stat[1][1]
image2 = cv2.resize(image2, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image3 = self.apply_qtransform(waves, self.wave_transform[2])
image3 = image3.squeeze().numpy().transpose(1,2,0)
image3 = cv2.vconcat([image3[:,:,0],image3[:,:,1],image3[:,:,2]])
image3 = (image3-self.stat[2][0])/self.stat[2][1]
image3 = cv2.resize(image3, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
#if self.transform is not None:
# image = self.transform(image=image)['image']
image1 = torch.from_numpy(image1).unsqueeze(dim=0)
image2 = torch.from_numpy(image2).unsqueeze(dim=0)
image3 = torch.from_numpy(image3).unsqueeze(dim=0)
return image1, image2, image3, label
####################
# Data Module
####################
class SETIDataModule(pl.LightningDataModule):
def __init__(self, conf):
super().__init__()
self.conf = conf
# OPTIONAL, called only on 1 GPU/machine(for download or tokenize)
def prepare_data(self):
pass
# OPTIONAL, called for every GPU/machine
def setup(self, stage=None):
if stage == 'test':
test_df = pd.read_csv(os.path.join(self.conf.data_dir, "sample_submission.csv"))
test_df['dir'] = os.path.join(self.conf.data_dir, "test")
self.test_dataset = G2NetDataset(test_df, transform=None,conf=self.conf, train=False)
# ====================================================
# Inference function
# ====================================================
def inference(models, test_loader):
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
raw_probs = [[] for i in range(len(models))]
probs = []
probs_flattop = []
probs_blackmanharris = []
probs_nuttall = []
with torch.no_grad():
for i, (images) in tk0:
images1 = images[0].cuda()
images2 = images[1].cuda()
images3 = images[2].cuda()
avg_preds = []
flattop = []
blackmanharris = []
nuttall = []
for mid, model in enumerate(models):
y_preds_1 = model(images1)
y_preds_2 = model(images2)
y_preds_3 = model(images3)
y_preds = (y_preds_1 + y_preds_2 + y_preds_3)/3
avg_preds.append(y_preds.sigmoid().to('cpu').numpy())
flattop.append(y_preds_1.sigmoid().to('cpu').numpy())
blackmanharris.append(y_preds_2.sigmoid().to('cpu').numpy())
nuttall.append(y_preds_3.sigmoid().to('cpu').numpy())
raw_probs[mid].append(y_preds.sigmoid().to('cpu').numpy())
avg_preds = np.mean(avg_preds, axis=0)
flattop = np.mean(flattop, axis=0)
blackmanharris = np.mean(blackmanharris, axis=0)
nuttall = np.mean(nuttall, axis=0)
probs.append(avg_preds)
probs_flattop.append(flattop)
probs_blackmanharris.append(blackmanharris)
probs_nuttall.append(nuttall)
for mid in range(len(models)):
raw_probs[mid] = np.concatenate(raw_probs[mid])
probs = np.concatenate(probs)
probs_flattop = np.concatenate(probs_flattop)
probs_blackmanharris = np.concatenate(probs_blackmanharris)
probs_nuttall = np.concatenate(probs_nuttall)
return probs, probs_flattop, probs_blackmanharris, probs_nuttall, raw_probs
####################
# Train
####################
def main():
conf_cli = OmegaConf.from_cli()
conf = OmegaConf.merge(conf_base, conf_cli)
print(OmegaConf.to_yaml(conf))
seed_everything(2021)
# get model path
model_path = []
for i in range(5):
#if i == 4:
#model_path.append('/kqi/parent/22021886/fold3_0/ckpt/fold3-epoch=18-val_score=0.91562.ckpt')
# continue
#if i == 3:
# continue
#for j in range(conf.snap):
target_model = glob.glob(os.path.join(conf.model_dir, f'fold{i}/ckpt/*epoch*.ckpt'))
scores = [float(os.path.splitext(os.path.basename(i))[0].split('=')[-1]) for i in target_model]
model_path.append(target_model[scores.index(max(scores))])
models = []
for ckpt in model_path:
m = timm.create_model(model_name=conf.model_name, num_classes=1, pretrained=False, in_chans=1)
m = load_pytorch_model(ckpt, m, ignore_suffix='model')
m.cuda()
m.eval()
models.append(m)
data_module = SETIDataModule(conf)
data_module.setup(stage='test')
test_dataset = data_module.test_dataset
test_loader = DataLoader(test_dataset, batch_size=conf.batch_size, num_workers=4, shuffle=False, pin_memory=True, drop_last=False)
predictions, probs_flattop, probs_blackmanharris, probs_nuttall, raw_probs = inference(models, test_loader)
test = pd.read_csv(os.path.join(conf.data_dir, "sample_submission.csv"))
for mid, rp in enumerate(raw_probs):
test['target'] = rp
test[['id', 'target']].to_csv(os.path.join(conf.output_dir, f'pseudo_fold{mid}.csv'), index=False)
test['target'] = predictions
test[['id', 'target']].to_csv(os.path.join(conf.output_dir, "submission.csv"), index=False)
test['target'] = probs_flattop
test[['id', 'target']].to_csv(os.path.join(conf.output_dir, "submission_flattop.csv"), index=False)
test['target'] = probs_blackmanharris
test[['id', 'target']].to_csv(os.path.join(conf.output_dir, "submission_blackmanharris.csv"), index=False)
test['target'] = probs_nuttall
test[['id', 'target']].to_csv(os.path.join(conf.output_dir, "submission_nuttall.csv"), index=False)
print(test[['id', 'target']].head())
print(model_path)
if __name__ == "__main__":
main()
|
the-stack_106_17760
|
# coding: utf8
import logging
import os
import subprocess
from kalliope.core.Utils.FileManager import FileManager
logging.basicConfig()
logger = logging.getLogger("kalliope")
class PlayerModule(object):
"""
Mother class of Players.
Ability to convert mp3 to wave format.
"""
def __init__(self, **kwargs):
# set parameter from what we receive from the settings
self.convert = kwargs.get('convert_to_wav', True)
@staticmethod
def convert_mp3_to_wav(file_path_mp3):
"""
PyAudio, AlsaPlayer, sounddevices do not support mp3 files
MP3 files must be converted to a wave in order to be played
This function assumes ffmpeg is available on the system
:param file_path_mp3: the file path to convert from mp3 to wav
"""
logger.debug("Converting mp3 file to wav file: %s" % file_path_mp3)
fnull = open(os.devnull, 'w')
# temp file
tmp_file_wav = file_path_mp3 + ".wav"
# Convert mp3 to wave
subprocess.call(['avconv', '-y', '-i', file_path_mp3, tmp_file_wav],
stdout=fnull, stderr=fnull)
# remove the original file
FileManager.remove_file(file_path_mp3)
# rename the temp file with the same name as the original file
os.rename(tmp_file_wav, file_path_mp3)
|
the-stack_106_17761
|
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listsincelast RPC."""
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error
class ListSinceBlockTest (DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[2].generate(101)
self.sync_all()
self.test_no_blockhash()
self.test_invalid_blockhash()
self.test_reorg()
self.test_double_spend()
self.test_double_send()
def test_no_blockhash(self):
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
blockhash, = self.nodes[2].generate(1)
self.sync_all()
txs = self.nodes[0].listtransactions()
assert_array_result(txs, {"txid": txid}, {
"category": "receive",
"amount": 1,
"blockhash": blockhash,
"confirmations": 1,
})
assert_equal(
self.nodes[0].listsinceblock(),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
assert_equal(
self.nodes[0].listsinceblock(""),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
def test_invalid_blockhash(self):
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"0000000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"invalid-hex")
def test_reorg(self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
# Split network into two
self.split_network()
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
self.log.info('lastblockhash=%s' % (lastblockhash))
self.sync_all([self.nodes[:2], self.nodes[2:]])
self.join_network()
# listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
found = False
for tx in lsbres['transactions']:
if tx['txid'] == senttx:
found = True
break
assert found
def test_double_spend(self):
'''
This tests the case where the same UTXO is spent twice on two separate
blocks as part of a reorg.
ab0
/ \
aa1 [tx1] bb1 [tx2]
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Problematic case:
1. User 1 receives DGB in tx1 from utxo1 in block aa1.
2. User 2 receives DGB in tx2 from utxo1 (same) in block bb1
3. User 1 sees 2 confirmations at block aa3.
4. Reorg into bb chain.
5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now
invalidated.
Currently the solution to this is to detect that a reorg'd block is
asked for in listsinceblock, and to iterate back over existing blocks up
until the fork point, and to include all transactions that relate to the
node wallet.
'''
self.sync_all()
# Split network into two
self.split_network()
# share utxo between nodes[1] and nodes[2]
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
privkey = self.nodes[2].dumpprivkey(utxo['address'])
self.nodes[1].importprivkey(privkey)
# send from nodes[1] using utxo to nodes[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipient_dict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[1].getnewaddress(): change,
}
utxo_dicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
txid1 = self.nodes[1].sendrawtransaction(
self.nodes[1].signrawtransactionwithwallet(
self.nodes[1].createrawtransaction(utxo_dicts, recipient_dict))['hex'])
# send from nodes[2] using utxo to nodes[3]
recipient_dict2 = {
self.nodes[3].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
self.nodes[2].sendrawtransaction(
self.nodes[2].signrawtransactionwithwallet(
self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict2))['hex'])
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(4)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1"
# listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# but it should not include 'removed' if include_removed=false
lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False)
assert 'removed' not in lsbres2
def test_double_send(self):
'''
This tests the case where the same transaction is submitted twice on two
separate blocks as part of a reorg. The former will vanish and the
latter will appear as the true transaction (with confirmations dropping
as a result).
ab0
/ \
aa1 [tx1] bb1
| |
aa2 bb2
| |
aa3 bb3 [tx1]
|
bb4
Asserted:
1. tx1 is listed in listsinceblock.
2. It is included in 'removed' as it was removed, even though it is now
present in a different block.
3. It is listed with a confirmation count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3).
'''
self.sync_all()
# Split network into two
self.split_network()
# create and sign a transaction
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipient_dict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
utxo_dicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
signedtxres = self.nodes[2].signrawtransactionwithwallet(
self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict))
assert signedtxres['complete']
signedtx = signedtxres['hex']
# send from nodes[1]; this will end up in aa1
txid1 = self.nodes[1].sendrawtransaction(signedtx)
# generate bb1-bb2 on right side
self.nodes[2].generate(2)
# send from nodes[2]; this will end up in bb3
txid2 = self.nodes[2].sendrawtransaction(signedtx)
assert_equal(txid1, txid2)
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(2)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
self.nodes[0].gettransaction(txid1)
# listsinceblock(lastblockhash) should now include txid1 in transactions
# as well as in removed
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['transactions'])
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# find transaction and ensure confirmations is valid
for tx in lsbres['transactions']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
# the same check for the removed array; confirmations should STILL be 2
for tx in lsbres['removed']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
if __name__ == '__main__':
ListSinceBlockTest().main()
|
the-stack_106_17763
|
from __future__ import absolute_import
from __future__ import print_function
from dpark.serialize import load_func, dump_func
import sys
import operator
from six.moves import range
if sys.version_info[0] < 3:
def next_func(it):
return it.next
else:
def next_func(it):
return it.__next__
class HeapOnKey(object):
def __init__(self, key=None, min_heap=False):
self.key = key
self.min_heap = min_heap
self._setup_cmp()
def _setup_cmp(self):
key = self.key
min_heap = self.min_heap
def _ge0(x, y):
return not (x < y)
def _lt(x, y):
return key(x) < key(y)
def _ge(x, y):
return not (key(x) < key(y))
if key is None:
self.cmp_lt = operator.lt if min_heap else _ge0
else:
self.cmp_lt = _lt if min_heap else _ge
def __getstate__(self):
return dump_func(self.key), self.min_heap
def __setstate__(self, state):
key_f, self.min_heap = state
self.key = load_func(key_f)
self._setup_cmp()
def push(self, heap, item):
heap.append(item)
self._sift_down(heap, 0, len(heap) - 1)
def pop(self, heap):
last_item = heap.pop()
if heap:
ret_item = heap[0]
heap[0] = last_item
self._sift_up(heap, 0)
else:
ret_item = last_item
return ret_item
def push_pop(self, heap, item):
if heap and self.cmp_lt(heap[0], item):
item, heap[0] = heap[0], item
self._sift_up(heap, 0)
return item
def heapify(self, heap):
n = len(heap)
for i in range(n // 2 - 1, -1, -1):
self._sift_up(heap, i)
def _sift_down(self, heap, start_pos, pos):
new_item = heap[pos]
cmp_lt = self.cmp_lt
while pos > start_pos:
parent_pos = (pos - 1) >> 1
parent = heap[parent_pos]
if cmp_lt(new_item, parent):
heap[pos] = parent
pos = parent_pos
continue
break
heap[pos] = new_item
def _sift_up(self, heap, pos):
end_pos = len(heap)
child_pos = 2 * pos + 1
cmp_lt = self.cmp_lt
while child_pos < end_pos:
right_pos = child_pos + 1
if right_pos < end_pos and not cmp_lt(heap[child_pos], heap[right_pos]):
child_pos = right_pos
if cmp_lt(heap[pos], heap[child_pos]):
break
heap[pos], heap[child_pos] = heap[child_pos], heap[pos]
pos = child_pos
child_pos = 2 * pos + 1
def replace(self, heap, item):
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
self._sift_up(heap, 0)
return returnitem
def merge(self, iterables, ordered_iters=0):
"""iterables: each is sorted
ordered_iters: when come to equal value, the element in the first iter yields
first(last) if ordered_iters >(<) 0
not stable if ordered_iters == 0
"""
if not ordered_iters:
def key(x):
return self.key(x[0])
else:
def key(x):
return self.key(x[0]), x[1]
heap = HeapOnKey(key, self.min_heap)
_heappop, _heapreplace, _StopIteration = heap.pop, heap.replace, StopIteration
_len = len
h = []
h_append = h.append
order = -1 if ordered_iters and ((ordered_iters > 0) ^ self.min_heap) else 1
for it_idx, it in enumerate(map(iter, iterables)):
try:
_next = next_func(it)
h_append([_next(), order * it_idx, _next])
except _StopIteration:
pass
heap.heapify(h)
while _len(h) > 1:
try:
while 1:
v, _, _next = s = h[0]
yield v
s[0] = _next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, _, _next = h[0]
yield v
for v in _next.__self__:
yield v
def test():
lst = [10, 9, 20, 18, 3, 24, 29, 39]
h = HeapOnKey()
h.heapify(lst)
import sys
print('the list after heapify:', lst, file=sys.stderr)
lst.pop()
lst[0] = 12
h._sift_up(lst, 0)
print('the list after sift up:', lst, file=sys.stderr)
h.push(lst, 8)
print('the list after push:', lst, file=sys.stderr)
ret = h.pop(lst)
print('the list after pop:', lst, ' with value:', ret, file=sys.stderr)
h = HeapOnKey(min_heap=True)
h.heapify(lst)
h.push(lst, 12)
print('the list after reverse:', lst, file=sys.stderr)
class Foo:
def __init__(self, name='', age=0, score=None):
self.name = name
self.age = age
self.score = score
def __getstate__(self):
return self.name, self.age, self.score
def __setstate__(self, state):
self.name, self.age, self.score = state
def __repr__(self):
return '(name:' + self.name + ', age:' + str(self.age) + ', score(%d, %d, %d))' % self.score
def key_func(foo):
return foo.age
lst = [Foo('aaa', 10, (60, 89, 70)), Foo('bbb', 20, (78, 75, 60)),
Foo('ccc', 15, (60, 73, 84)), Foo('ddd', 21, (87, 64, 65)),
Foo('eee', 30, (54, 53, 79)), Foo('fff', 10, (87, 73, 98))]
h = HeapOnKey(key=key_func, min_heap=True)
h.heapify(lst)
print('the list after heapify:', lst, file=sys.stderr)
len_l = 100000
top_n = 10
lst = []
import random
for i in range(len_l):
lst.append(random.randint(1, 2 * len_l + 1))
top_l = []
call_cnt = 0
def cnt_key(x):
global call_cnt
call_cnt += 1
return x
h = HeapOnKey(key=cnt_key)
import time
start = time.time()
for i in lst:
if len(top_l) >= top_n:
h.push_pop(top_l, i)
else:
h.push(top_l, i)
top_l.sort(key=cnt_key)
print('after heap:', top_l, ' with elapsed:', time.time() - start, ' with cnt:', call_cnt / 2, file=sys.stderr)
call_cnt = 0
start = time.time()
top_l = []
for i in lst:
top_l.append(i)
top_l.sort(key=cnt_key)
if len(top_l) > top_n:
top_l.pop()
print('after sort:', top_l, ' with elapsed:', time.time() - start, ' with cnt:', call_cnt / 2, file=sys.stderr)
if __name__ == '__main__':
test()
|
the-stack_106_17764
|
# -*- coding: utf-8 -*-
import json
from math import sqrt
from typing import List
from flask import Flask, request, Response, jsonify
from flask_cors import CORS
from sliding_puzzle import Puzzle, TypePuzzle
from sliding_puzzle.algorithm import get_algorithm
app = Flask(__name__)
application = app
CORS(application)
def all_diff(solutions: List[Puzzle]) -> List[int]:
"""Allows you to collect all the pieces to move to solve the puzzle
:param solutions: a list of puzzle sorted in the order of the solution.
The first element is the basic puzzle, the last is the solution
:type solutions: List[Puzzle]
:return: a list of integers where each integer corresponds to the number of the box to be moved
:rtype: List[int]
"""
def one_diff(puzzle1: Puzzle, puzzle2: Puzzle) -> int:
"""Return the cell that has changed between puzzle1 and puzzle2
We retrieve the index of zero in the second puzzle, then we look at the number of this index in the first puzzle
:param puzzle1: First Puzzle
:type puzzle1: Puzzle
:param puzzle2: Second Puzzle. This must be the first puzzle with one more move
:type puzzle2: Puzzle
:return: the element that moved between the first and the second puzzle
:rtype: int
"""
new_pos = puzzle2.get_index(0)
return puzzle1.tiles[new_pos[0]][new_pos[1]]
i = 0
result = []
while i < len(solutions) - 1:
result.append(one_diff(solutions[i], solutions[i + 1]))
i += 1
return result
@app.route("/solve", methods=["POST"])
def main():
"""Web service allowing a Client to interact with the sliding_puzzle application
The client (here the Flutter application) must make an HTTP POST request and put in the body ::
- tiles: the list of the puzzle. For example [[1, 2, 3], [4, 5, 6], [7, 8, 0]] (in string or int)
- method: the method used to solve the puzzle
- blankAtFirst: True if the blank need to be on the first (top), optional, defaults to True
:return: An HTTP request. In the body are all the moves to perform to solve the puzzle
"""
if not request.is_json:
return jsonify(error=True, message="application/json is not used"), 400
try:
data_json = request.get_json(force=True)
method = data_json.get("method")
blank_at_first = bool(data_json.get("blankAtFirst", True))
tiles_tmp = data_json.get("tiles")
try:
if isinstance(tiles_tmp, list):
tiles = tiles_tmp
else:
tiles: TypePuzzle = json.loads(tiles_tmp)
except json.decoder.JSONDecodeError:
sqrt_tiles = int(sqrt(len([int(t) for t in " ".join(tiles_tmp.split())])))
tiles: TypePuzzle = [
tiles_tmp[x : x + sqrt_tiles]
for x in range(0, len(tiles_tmp), sqrt_tiles)
]
if (not tiles) or (not method) or (method not in get_algorithm.keys()):
return jsonify(error=True, message="Malformed request"), 400
# We solve the puzzle
puzzle: Puzzle = Puzzle(tiles, blank_at_first=blank_at_first)
strategy = get_algorithm.get(method)
strategy = strategy(puzzle)
strategy.solve()
solutions: List[int] = all_diff(strategy.solution)
except Exception as e:
return jsonify(error=True, message="Malformed request", description=e), 400
# Create HTTP Response
response = Response(json.dumps({"solutions": solutions}))
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Credentials"] = True
response.headers["Access-Control-Allow-Headers"] = "Origin,Content-Type"
response.headers["Access-Control-Allow-Methods"] = "POST, OPTIONS, HEAD"
return response
@app.route("/", methods=["GET"])
def home():
"""Default path, indicates information about the project
:return: An HTTP request containing the basic information of the project
"""
return (
jsonify(
url="https://github.com/av1m/sliding-block-puzzles",
endpoint=[
{"path": "/", "method": "GET"},
{"path": "/solve", "method": "POST"},
],
),
200,
)
|
the-stack_106_17765
|
#! /usr/bin/env python3
"""Checks file name lengths
Copyright (C) 2019-2021 kaoru https://www.tetengo.org/
"""
import os
import subprocess
import sys
from typing import List
import list_sources
max_length: int = 80
def main(args: List[str]) -> None:
"""The main function.
Args:
args (list[str]): Program rguments
"""
root_path_string = str(list_sources.root())
for path in list_sources.list():
path_string = str(path)[len(root_path_string) :]
path_main_string, extension_string = os.path.splitext(path)
if len(path_string) > max_length:
candidate_path_string: str = _candidate(path_string, extension_string)
if len(args) > 0 and args[1] == "git_mv":
_git_mv(root_path_string, path_string, candidate_path_string)
else:
_report_too_long(path_string, candidate_path_string)
elif len(path_string) < max_length and path_main_string.endswith("X"):
_report_too_short(path_string)
def _candidate(path_string: str, extension_string: str) -> str:
return (
os.path.splitext(path_string)[0][: max_length - len(extension_string) - 1]
+ "X"
+ extension_string
)
def _git_mv(
root_path_string: str, current_path: str, corrected_path_string: str
) -> None:
subprocess.run(
[
"git",
"mv",
root_path_string + current_path,
root_path_string + corrected_path_string,
],
check=True,
)
def _report_too_long(current_path: str, corrected_path_string: str) -> None:
print(
"Too long path ({} > {}): {}".format(
len(current_path), max_length, current_path
)
)
print(" Candidate: {}".format(corrected_path_string))
def _report_too_short(current_path: str) -> None:
print(
"Too short path ({} < {}): {}".format(
len(current_path), max_length, current_path
)
)
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_106_17767
|
import sys
def minPalPartion(str1):
n = len(str1);
C = [0]*(n+1);
P = [[False for x in range(n+1)] for y in range(n+1)];
for i in range(n):
P[i][i] = True;
for L in range(2, n + 1):
for i in range(n - L + 1):
j = i + L - 1;
if (L == 2):
P[i][j] = (str1[i] == str1[j]);
else:
P[i][j] = ((str1[i] == str1[j]) and P[i + 1][j - 1]);
for i in range(n):
if (P[0][i] == True):
C[i] = 0;
else:
C[i] = sys.maxsize;
for j in range(i):
if(P[j + 1][i] == True and 1 + C[j] < C[i]):
C[i] = 1 + C[j];
return C[n - 1];
str1 = "ababbbabbababa";
print("Min cuts needed for Palindrome Partitioning is",minPalPartion(str1));
|
the-stack_106_17768
|
""" Utility functions that simplify defining field of dataclasses.
"""
import argparse
import dataclasses
import enum
import functools
import inspect
import json
import warnings
from collections import OrderedDict
from dataclasses import _MISSING_TYPE, MISSING
from enum import Enum
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from simple_parsing.utils import (
Dataclass,
SimpleValueType,
get_type_arguments,
is_optional,
is_tuple,
is_union,
str2bool,
)
from logging import getLogger
logger = getLogger(__name__)
E = TypeVar("E", bound=Enum)
K = TypeVar("K")
V = TypeVar("V")
T = TypeVar("T")
def field(
default: Union[T, _MISSING_TYPE] = MISSING,
alias: Optional[Union[str, List[str]]] = None,
cmd: bool = True,
positional: bool = False,
*,
to_dict: bool = True,
encoding_fn: Optional[Callable[[T], Any]] = None,
decoding_fn: Optional[Callable[[Any], T]] = None,
# dataclasses.field arguments
default_factory: Union[Callable[[], T], _MISSING_TYPE] = MISSING,
init: bool = True,
repr: bool = True,
hash: Optional[bool] = None,
compare: bool = True,
metadata: Optional[Dict[str, Any]] = None,
**custom_argparse_args: Any,
) -> T:
"""Extension of the `dataclasses.field` function.
Adds the ability to customize how this field's command-line options are
created, as well as how it is serialized / deseralized (if the containing
dataclass inherits from `simple_parsing.Serializable`.
Leftover arguments are fed directly to the
`ArgumentParser.add_argument(*option_strings, **kwargs)` method.
Parameters
----------
default : Union[T, _MISSING_TYPE], optional
The default field value (same as in `dataclasses.field`), by default MISSING
alias : Union[str, List[str]], optional
Additional option_strings to pass to the `add_argument` method, by
default None. When passing strings which do not start by "-" or "--",
will be prefixed with "-" if the string is one character and by "--"
otherwise.
cmd: bool, optional
Whether to add command-line arguments for this field or not. Defaults to
True.
## Serialization-related Keyword Arguments:
to_dict : bool
Whether to include this field in the dictionary when calling `to_dict()`.
Defaults to True.
Only has an effect when the dataclass containing this field is
`Serializable`.
encoding_fn : Callable[[T], Any], optional
Function to apply to this field's value when encoding the dataclass to a
dict. Only has an effect when the dataclass containing this field is
`Serializable`.
decoding_fn : Callable[[Any], T]. optional
Function to use in order to recover a the value of this field from a
serialized entry in a dictionary (inside `cls.from_dict`).
Only has an effect when the dataclass containing this field is
`Serializable`.
## Keyword Arguments of `dataclasses.field`
default_factory : Union[Callable[[], T], _MISSING_TYPE], optional
(same as in `dataclasses.field`), by default None
init : bool, optional
(same as in `dataclasses.field`), by default True
repr : bool, optional
(same as in `dataclasses.field`), by default True
hash : bool, optional
(same as in `dataclasses.field`), by default None
compare : bool, optional
(same as in `dataclasses.field`), by default True
metadata : Dict[str, Any], optional
(same as in `dataclasses.field`), by default None
Returns
-------
T
The value returned by the `dataclasses.field` function.
"""
_metadata: Dict[str, Any] = metadata if metadata is not None else {}
if alias:
_metadata["alias"] = alias if isinstance(alias, list) else [alias]
_metadata.update(dict(to_dict=to_dict))
if encoding_fn is not None:
_metadata.update(dict(encoding_fn=encoding_fn))
if decoding_fn is not None:
_metadata.update(dict(decoding_fn=decoding_fn))
_metadata["cmd"] = cmd
_metadata["positional"] = positional
if custom_argparse_args:
_metadata.update({"custom_args": custom_argparse_args})
action = custom_argparse_args.get("action")
if action == "store_false":
if default not in {MISSING, True}:
raise RuntimeError(
"default should either not be passed or set "
"to True when using the store_false action."
)
default = True # type: ignore
elif action == "store_true":
if default not in {MISSING, False}:
raise RuntimeError(
"default should either not be passed or set "
"to False when using the store_true action."
)
default = False # type: ignore
if default is not MISSING:
return dataclasses.field( # type: ignore
default=default,
init=init,
repr=repr,
hash=hash,
compare=compare,
metadata=_metadata,
)
elif not isinstance(default_factory, dataclasses._MISSING_TYPE):
return dataclasses.field(
default_factory=default_factory,
init=init,
repr=repr,
hash=hash,
compare=compare,
metadata=_metadata,
)
else:
return dataclasses.field(
init=init, repr=repr, hash=hash, compare=compare, metadata=_metadata
)
@overload
def choice(choices: Type[E], default: E, **kwargs) -> E:
pass
@overload
def choice(choices: Dict[K, V], default: K, **kwargs) -> V:
pass
@overload
def choice(*choices: T, default: T, **kwargs) -> T:
pass
def choice(*choices: T, default: Union[T, _MISSING_TYPE] = MISSING, **kwargs: Any) -> T:
"""Makes a field which can be chosen from the set of choices from the
command-line.
Returns a regular `dataclasses.field()`, but with metadata which indicates
the allowed values.
(New:) If `choices` is a dictionary, then passing the 'key' will result in
the corresponding value being used. The values may be objects, for example.
Similarly for Enum types, passing a type of enum will
Args:
default (T, optional): The default value of the field. Defaults to dataclasses.MISSING,
in which case the command-line argument is required.
Raises:
ValueError: If the default value isn't part of the given choices.
Returns:
T: the result of the usual `dataclasses.field()` function (a dataclass field/attribute).
"""
assert len(choices) > 0, "Choice requires at least one positional argument!"
if len(choices) == 1:
choices = choices[0]
if inspect.isclass(choices) and issubclass(choices, Enum):
# If given an enum, construct a mapping from names to values.
choice_enum: Type[Enum] = choices
choices = OrderedDict((e.name, e) for e in choice_enum)
if default is not MISSING and not isinstance(default, choice_enum):
if default in choices:
warnings.warn(
UserWarning(
f"Setting default={default} could perhaps be ambiguous "
f"(enum names vs enum values). Consider using the enum "
f"value {choices[default]} instead."
)
)
default = choices[default]
else:
raise ValueError(
f"'default' arg should be of type {choice_enum}, but got {default}"
)
if isinstance(choices, dict):
# if the choices is a dict, the options are the keys
# save the info about the choice_dict in the field metadata.
metadata = kwargs.setdefault("metadata", {})
choice_dict = choices
# save the choice_dict in metadata so that we can recover the values in postprocessing.
metadata["choice_dict"] = choice_dict
choices = list(choice_dict.keys())
# TODO: If the choice dict is given, then add encoding/decoding functions that just
# get/set the right key.
def _encoding_fn(value: Any) -> str:
"""Custom encoding function that will simply represent the value as the
the key in the dict rather than the value itself.
"""
if value in choice_dict.keys():
return value
elif value in choice_dict.values():
return [k for k, v in choice_dict.items() if v == value][0]
return value
kwargs.setdefault("encoding_fn", _encoding_fn)
def _decoding_fn(value: Any) -> str:
"""Custom decoding function that will retrieve the value from the
stored key in the dictionary.
"""
return choice_dict.get(value, value)
kwargs.setdefault("decoding_fn", _decoding_fn)
return field(default=default, choices=choices, **kwargs)
def list_field(*default_items: T, **kwargs) -> List[T]:
"""shorthand function for setting a `list` attribute on a dataclass,
so that every instance of the dataclass doesn't share the same list.
Accepts any of the arguments of the `dataclasses.field` function.
Returns:
List[T]: a `dataclasses.field` of type `list`, containing the `default_items`.
"""
default = kwargs.pop("default", None)
if isinstance(default, list):
# can't have that. field wants a default_factory.
# we just give back a copy of the list as a default factory,
# but this should be discouraged.
from copy import deepcopy
kwargs["default_factory"] = lambda: deepcopy(default)
return mutable_field(list, default_items, **kwargs)
def dict_field(
default_items: Union[Dict[K, V], Iterable[Tuple[K, V]]] = None, **kwargs
) -> Dict[K, V]:
"""shorthand function for setting a `dict` attribute on a dataclass,
so that every instance of the dataclass doesn't share the same `dict`.
NOTE: Do not use keyword arguments as you usually would with a dictionary
(as in something like `dict_field(a=1, b=2, c=3)`). Instead pass in a
dictionary instance with the items: `dict_field(dict(a=1, b=2, c=3))`.
The reason for this is that the keyword arguments are interpreted as custom
argparse arguments, rather than arguments of the `dict` function!)
Also accepts any of the arguments of the `dataclasses.field` function.
Returns:
Dict[K, V]: a `dataclasses.Field` of type `Dict[K, V]`, containing the `default_items`.
"""
if default_items is None:
default_items = {}
elif isinstance(default_items, dict):
default_items = default_items.items()
return mutable_field(dict, default_items, **kwargs)
def set_field(*default_items: T, **kwargs) -> Set[T]:
return mutable_field(set, default_items, **kwargs)
def mutable_field(
_type: Type[T],
*args,
init: bool = True,
repr: bool = True,
hash: bool = None,
compare: bool = True,
metadata: Dict[str, Any] = None,
**kwargs,
) -> T:
# TODO: Check whether some of the keyword arguments are destined for the `field` function, or for the partial?
default_factory = kwargs.pop("default_factory", functools.partial(_type, *args))
return field(
default_factory=default_factory,
init=init,
repr=repr,
hash=hash,
compare=compare,
metadata=metadata,
**kwargs,
)
MutableField = mutable_field
def subparsers(
subcommands: Dict[str, Type[Dataclass]], default: Dataclass = MISSING, **kwargs
) -> Any:
return field(
metadata={
"subparsers": subcommands,
},
default=default,
**kwargs,
)
def flag(default: bool, **kwargs):
"""Creates a boolean field with a default value of `default` and nargs='?'."""
action = "store_true" if default is False else "store_false"
return field(default=default, nargs="?", action=action, type=str2bool, **kwargs)
|
the-stack_106_17770
|
import os
from .memory_index import MemoryIndex
from ..utils.serialization import dump_object, load_object
class PersistentIndex(MemoryIndex):
"""An extension of the in-memory index class that commits index
changes to disk."""
def __init__(self, index_path: str) -> None:
super().__init__()
self.index_path = index_path
self.blacklist_path = os.path.join(self.index_path, "blacklist")
self.map_path = os.path.join(self.index_path, "map")
self.load()
def load(self) -> None:
"""Loads the index from disk."""
index_map = load_object(self.map_path)
if index_map is not None:
self.index_map = index_map
blacklist = load_object(self.blacklist_path)
if blacklist is not None:
self.index_blacklist = blacklist
def commit(self) -> None:
"""Persists the index to disk."""
dump_object(self.blacklist_path, self.index_blacklist)
dump_object(self.map_path, self.index_map)
|
the-stack_106_17771
|
import sys
import os
import json
import re
import numpy as np
#import pandas as pd
from Bio import motifs
from Bio import SeqIO
from Bio.Alphabet import IUPAC
#from io import StringIO
from io import StringIO
#import man4ish_guptamfmd.Utils.mfmdUtil as MFU
#import mfmdUtil as MFU
from installed_clients.DataFileUtilClient import DataFileUtil
import shutil
def build_mfmd_command(inputFilePath, motiflen, prb,config):
shutil.copytree('/kb/module/deps/kb_mfmd/mfmd', '/kb/module/work/tmp/mfmd')
#if not os.path.exists('/kb/module/work/tmp/mfmd'):
# os.mkdir('/kb/module/work/tmp/mfmd')
#outputFilePath = '/kb/module/work/tmp/mfmd/mfmd_out/mfmd_out.txt'
print(prb)
print(motiflen)
cwd=config['scratch']
os.chdir("/kb/module/work/tmp")
command = 'java -jar mfmd/mfmd.jar ' + inputFilePath + ' ' + str(motiflen) + ' ' + str(prb)
#command = 'java -jar /kb/module/work/tmp/mfmd/mfmd.jar ' + inputFilePath + ' ' + str(motiflen) + ' ' + str(prb)
#print(command)
return command
def run_mfmd_command(command):
print(command)
os.system('R CMD INSTALL /kb/module/deps/kb_mfmd/Rserve_1.7-3.1.tar.gz')
os.system('/usr/lib/R/bin/Rscript /kb/module/deps/kb_mfmd/mfmd/script.R')
os.system(command)
def parse_mfmd_output(path):
pfmList = []
pfmDict={}
outputFileList = []
pfmMatrix=False
seqflag=False
motifList={}
motifDict={}
locList=[]
alphabet=['A','C','G','T']
motifSet=[]
motifList['Condition']='temp'
motifList['SequenceSet_ref']='123'
background={}
background['A']=0.0
background['C']=0.0
background['G']=0.0
background['T']=0.0
motifDict['Motif_Locations'] = []
motifDict['PWM'] = []
motifDict['PFM'] = []
motiflen=0
a=[]
c=[]
g=[]
t=[]
pwmList=[]
pwmDict={}
rowList = []
rowDict={}
for filename in os.listdir(path):
outputFileList.append(path + '/' + filename)
if(filename=="mfmd_out.txt"):
outputFilePath=path+'/'+filename
mfmdFile = open(outputFilePath,'r')
for line in mfmdFile:
if(re.search("PPM Matrix",line)):
pfmMatrix=True
if(pfmMatrix):
if(line[0].isdigit()):
line=line.strip()
out=line.split()
pfmList.append(out)
a.append(out[0])
c.append(out[1])
g.append(out[2])
t.append(out[3])
rowList = []
rowList.append(('A',float(out[0])))
rowList.append(('C',float(out[1])))
rowList.append(('G',float(out[2])))
rowList.append(('T',float(out[3])))
rowDict['A']=float(out[0])
rowDict['C']=float(out[1])
rowDict['G']=float(out[2])
rowDict['T']=float(out[3])
if(re.search("PSSM Matrix",line)):
pfmMatrix=False
if(re.search("Sequences",line)):
seqflag=True
if(seqflag==True):
line=line.strip()
if(re.search('\*',line)):
seqflag=False
if((line) and not (line.startswith("Seq")) and not (line.startswith("*"))):
line=line.rstrip()
seq=line.split()
seqid=seq[0]
seq_start=int(seq[1])
seq_end=int(seq_start)+int(motiflen)
sequence=seq[2]
orientation='+'
locDict={}
locDict['sequence_id']=seqid;
locDict['start']=seq_start;
locDict['end']=seq_end;
locDict['sequence']=sequence;
locDict['orientation']=orientation;
motifDict['Motif_Locations'].append(locDict)
if(re.search("Width",line)):
arr=line.split(" ")
motiflen=arr[1].split("\t")[0]
a=[float(x) for x in a]
c=[float(x) for x in c]
g=[float(x) for x in g]
t=[float(x) for x in t]
pwmDict['A']=a
pwmDict['C']=c
pwmDict['G']=g
pwmDict['T']=t
pfmDict['A']=[]
pfmDict['C']=[]
pfmDict['G']=[]
pfmDict['T']=[]
motifStr = '>test\n'
motifStr += 'A ' + str(a).replace(',','') + '\n'
motifStr += 'C ' + str(c).replace(',','') + '\n'
motifStr += 'G ' + str(g).replace(',','') + '\n'
motifStr += 'T ' + str(t).replace(',','') + '\n'
handle = StringIO(motifStr)
BioMotif = motifs.read(handle, 'jaspar')
motifDict['PWM']=pwmDict
motifDict['PFM']=pfmDict
motifDict['Iupac_sequence']=str(BioMotif.degenerate_consensus)
motifSet.append(motifDict) #keep in loop for multiple motifs
motifList['Motifs']=motifSet
motifList['Background']=background
motifList['Alphabet']=alphabet
return motifList
def UploadFrommfmd(callback_url, params):
"""
:param params: instance of type "UploadmfmdInParams" -> structure:
parameter "path" of String, parameter "ws_name" of String,
parameter "obj_name" of String
:returns: instance of type "UploadOutput" -> structure: parameter
"obj_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN UploadFrommfmd
print('Extracting motifs')
#motifList = MFU.parse_mfmd_output(params['path'])
motifList = parse_mfmd_output(params['path'])
print(motifList)
MSO = {}
MSO=motifList
'''MSO['Condition'] = 'Temp'
MSO['SequenceSet_ref'] = '123'
MSO['Motifs'] = []
MSO['Alphabet'] = ['A','C','G','T']
#MSO['Background'] = MSU.GetBackground()
#for letter in MSO['Alphabet']:
# MSO['Background'][letter] = 0.0
#MSU.parseMotifList(motifList,MSO)'''
'''params['min_len']=22 #put dummy value for min and max len
params['max_len']=22
#MSU.CheckLength(motifList,params['min_len'],params['max_len'])
#MSU.CheckLength(MSO,params['min_len'],params['max_len'])
for motif in MSO['Motifs']:
print()
for letter in MSO['Alphabet']:
if len(motif['PWM'][letter]) != len(motif['Iupac_sequence']):
print('CAUGHT PWM ERROR HERE')
exit(1)
if 'absolute_locations' in params:
for motif in MSO['Motifs']:
for loc in motif['Motif_Locations']:
if loc['sequence_id'] in params['absolute_locations']:
loc['sequence_id'] = params['contig']
absStart = int(params['start'])
loc['start'] = absStart
loc['end'] = absStart + loc['end']
print("test2")'''
dfu = DataFileUtil(callback_url)
save_objects_params = {}
save_objects_params['id'] = dfu.ws_name_to_id(params['ws_name'])
save_objects_params['objects'] = [{'type': 'KBaseGeneRegulation.MotifSet' , 'data' : MSO , 'name' : params['obj_name']}]
info = dfu.save_objects(save_objects_params)[0]
print('SAVED OBJECT')
print(info)
motif_set_ref = "%s/%s/%s" % (info[6], info[0], info[4])
print(motif_set_ref)
output = {'obj_ref' : motif_set_ref}
print(output)
#exit("test")
#END UploadFrommfmd
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method UploadFrommfmd return value ' +
'output is not type dict as required.')
# return the results
return [output]
|
the-stack_106_17772
|
import os
import random
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from torch import nn, optim
from torch.utils.data import DataLoader, Dataset
from fairtorch import DemographicParityLoss
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(2020)
class DatasetGenerator:
def __clean_up_data(self, df):
# use_columns = ["gender", "lsat", "pass_bar", "race1", "ugpa", "fulltime"]
use_columns = [
"decile1b",
"decile3",
"decile1",
"cluster",
"lsat",
"ugpa",
"zfygpa",
"DOB_yr",
"grad",
"zgpa",
"fulltime",
"fam_inc",
"age",
"gender",
"parttime",
"male",
"race1",
"Dropout",
"pass_bar",
"tier",
"index6040",
]
df.loc[:, "race1"] = df.loc[:, "race1"].astype(str)
df.loc[:, "race1"] = df.loc[:, "race1"].where(df.loc[:, "race1"] == "white", 0)
df.loc[:, "race1"] = df.loc[:, "race1"].where(df.loc[:, "race1"] != "white", 1)
df.loc[:, "race1"] = df.loc[:, "race1"].astype(int)
categorical_cols = ["grad", "gender", "Dropout"]
df = df.dropna()
for col in use_columns:
if col not in categorical_cols:
df.loc[:, col] = df.loc[:, col].astype(float)
df.loc[:, "gender"] = df.loc[:, "gender"].astype(str)
df = df[use_columns]
df.loc[:, categorical_cols] = df.loc[:, categorical_cols].apply(
LabelEncoder().fit_transform
)
return df.reset_index(drop=True)
def generate_dataset(self, dataset_csv_path=Path("./examples/inputs/bar_pass_prediction.csv")):
df = pd.read_csv(dataset_csv_path)
df = self.__clean_up_data(df)
return df
class BarPassDataset(Dataset):
def __init__(self, x, y, sensitive_feature, transform=None):
self.transform = transform
self.x = x
self.y = y
self.sensitive_feature = sensitive_feature
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
x_i = self.x[idx]
y_i = self.y[idx]
sensitive_feature_i = self.sensitive_feature[idx]
if self.transform:
x_i = self.transform(x_i)
return x_i, y_i, sensitive_feature_i
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.value = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, value, n=1):
self.value = value
self.sum += value * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {value" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class Trainer:
def __init__(
self,
model,
criterion,
optimizer,
train_dataloader,
valid_dataloader,
fairness_constraint=None,
max_epoch=100,
use_fairness_penalty=True,
metrics="valid_auc",
metrics_direction="max",
early_stopping=True,
early_stopping_patience=10,
):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = model.to(self.device)
self.criterion = criterion.to(self.device)
self.fairness_constraint = fairness_constraint.to(self.device)
self.optimizer = optimizer
self.train_dataloader = train_dataloader
self.valid_dataloader = valid_dataloader
self.max_epoch = max_epoch
self.use_fairness_penalty = use_fairness_penalty
self.metrics = metrics
self.metrics_direction = metrics_direction
self.early_stopping = early_stopping
self.early_stopping_patience = early_stopping_patience
def training_step(self, epoch):
train_loss_epoch = AverageMeter("train_loss", ":.4e")
penalty_epoch = AverageMeter("train_fairness_penalty", ":.4e")
y_list = []
prediction_list = []
self.model.train()
for batch_idx, (x, y, sensitive_feature) in enumerate(self.train_dataloader):
self.optimizer.zero_grad()
logit = self.model(x.to(self.device))
loss = self.criterion(logit.view(-1), y.to(self.device))
if self.fairness_constraint:
penalty = self.fairness_constraint(
x, logit.view(-1), sensitive_feature.to(self.device), y.to(self.device)
)
else:
penalty = 0
if self.use_fairness_penalty:
loss = loss + penalty
prediction = torch.sigmoid(logit)
loss.backward()
self.optimizer.step()
train_loss_epoch.update(loss.item(), x.size(0))
penalty_epoch.update(penalty.item(), x.size(0))
y_list.append(y.detach().cpu())
prediction_list.append(prediction[:, 0])
y = torch.cat(y_list)
prediction = torch.cat(prediction_list)
train_acc_epoch = accuracy_score(
y.detach().cpu().numpy(), (prediction >= 0.5).detach().cpu()
)
train_auc_epoch = roc_auc_score(y.detach().cpu().numpy(), prediction.detach().cpu())
result = {
"epoch": epoch,
"train_loss_epoch": train_loss_epoch.value,
"train_acc_epoch": train_acc_epoch,
"train_auc_epoch": train_auc_epoch,
"train_fairness_penalty": penalty_epoch.value,
}
print(result)
return result
def validation_step(self, epoch):
valid_loss = AverageMeter("validation_loss", ":.4e")
penalty_epoch = AverageMeter("valid_fairness_penalty", ":.4e")
y_list = []
prediction_list = []
self.model.eval()
with torch.no_grad():
for batch_idx, (x, y, sensitive_feature) in enumerate(self.valid_dataloader):
logit = self.model(x.to(self.device))
loss = self.criterion(logit.view(-1), y.to(self.device))
if self.fairness_constraint:
penalty = self.fairness_constraint(
x, logit.view(-1), sensitive_feature.to(self.device), y.to(self.device),
)
else:
penalty = 0
if self.use_fairness_penalty:
loss = loss + penalty
valid_loss.update(loss.item(), x.size(0))
penalty_epoch.update(penalty.item(), x.size(0))
prediction = torch.sigmoid(logit)
y_list.append(y.detach().cpu())
prediction_list.append(prediction[:, 0])
prediction = torch.cat(prediction_list)
y = torch.cat(y_list)
valid_acc = accuracy_score(y.detach().cpu().numpy(), (prediction >= 0.5).detach().cpu())
valid_auc = roc_auc_score(y.detach().cpu().numpy(), prediction.detach().cpu())
result = {
"epoch": epoch,
"valid_loss": valid_loss.value,
"valid_acc": valid_acc,
"valid_auc": valid_auc,
"valid_fairness_penalty": penalty_epoch.value,
}
print(result)
return result
def fit(self):
if self.metrics_direction == "max":
metrics_best = -np.inf
else:
metrics_best = np.inf
train_result_best = {}
valid_result_best = {}
no_improvement = 0
for epoch in range(self.max_epoch):
train_result = self.training_step(epoch)
valid_result = self.validation_step(epoch)
if self.metrics_direction == "max":
if metrics_best < valid_result[self.metrics]:
metrics_best = valid_result[self.metrics]
train_result_best = train_result
valid_result_best = valid_result
else:
no_improvement += 1
else:
if metrics_best > valid_result[self.metrics]:
metrics_best = valid_result[self.metrics]
train_result_best = train_result
valid_result_best = valid_result
else:
no_improvement += 1
if self.early_stopping:
if no_improvement > self.early_stopping_patience:
break
return metrics_best, train_result_best, valid_result_best
def get_dataloader(
df, train_index, val_index, label="pass_bar", sensitive_feature_elements="gender"
):
drop_elements = [label]
x_train = df.drop(drop_elements, axis=1).loc[train_index]
y_train = df.loc[train_index, label]
sensitive_feature_train = df.loc[train_index, sensitive_feature_elements]
x_valid = df.drop(drop_elements, axis=1).loc[val_index]
y_valid = df.loc[val_index, label]
sensitive_feature_valid = df.loc[val_index, sensitive_feature_elements]
x_train = torch.from_numpy(x_train.values).float()
y_train = torch.from_numpy(y_train.values).float()
sensitive_feature_train = torch.from_numpy(sensitive_feature_train.values).float()
train_dataset = BarPassDataset(x=x_train, y=y_train, sensitive_feature=sensitive_feature_train)
train_dataloader = DataLoader(train_dataset, batch_size=128, shuffle=True)
x_valid = torch.from_numpy(x_valid.values).float()
y_valid = torch.from_numpy(y_valid.values).float()
sensitive_feature_valid = torch.from_numpy(sensitive_feature_valid.values).float()
valid_dataset = BarPassDataset(x=x_valid, y=y_valid, sensitive_feature=sensitive_feature_valid)
valid_dataloader = DataLoader(valid_dataset, batch_size=128, shuffle=False)
return train_dataloader, valid_dataloader
if __name__ == "__main__":
label = "pass_bar"
sensitive_feature_elements = "race1"
# sensitive_feature_elements = "gender"
data_generator = DatasetGenerator()
df = data_generator.generate_dataset()
metric_list = []
penalty_list = []
skf = StratifiedKFold(n_splits=5)
for fold, (train_index, val_index) in enumerate(skf.split(df, df["pass_bar"])):
train_dataloader, valid_dataloader = get_dataloader(
df=df,
train_index=train_index,
val_index=val_index,
label=label,
sensitive_feature_elements=sensitive_feature_elements,
)
feature_num = df.drop(label, axis=1).shape[1]
model = nn.Sequential(
nn.Linear(feature_num, 128),
nn.ReLU(),
nn.BatchNorm1d(128),
nn.Linear(128, 64),
nn.ReLU(),
nn.BatchNorm1d(64),
nn.Linear(64, 1),
)
criterion = nn.BCEWithLogitsLoss(pos_weight=None)
fairness_constraint = DemographicParityLoss(
alpha=100,
sensitive_classes=df[sensitive_feature_elements].unique().astype(int).tolist(),
)
optimizer = optim.Adam(model.parameters())
trainer = Trainer(
model=model,
criterion=criterion,
fairness_constraint=fairness_constraint,
optimizer=optimizer,
train_dataloader=train_dataloader,
valid_dataloader=valid_dataloader,
use_fairness_penalty=False,
)
metrics_best, train_result_best, valid_result_best = trainer.fit()
metric_list.append(metrics_best)
penalty_list.append(valid_result_best["valid_fairness_penalty"])
print(f"fold {fold}: metrics_best: {metrics_best}")
print(
f"fold {fold}: valid_fairness_penalty: {valid_result_best['valid_fairness_penalty']}"
)
print(f"metrics fold {metric_list}")
print(f"metrics CV mean {np.mean(metric_list)}")
print(f"penalty_list fold {penalty_list}")
print(f"penalty_list CV mean {np.mean(penalty_list)}")
|
the-stack_106_17773
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Benjamin Vial
# License: MIT
import json
import os
from urllib.request import urlretrieve
def _get_data_path(data_path=None):
"""Return path to data dir.
This directory stores large datasets required for the examples, to avoid
downloading the data several times.
By default the data dir is set to a folder named '.gyptis/data' in the
user home folder.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_path : str | None
The full path to the data dir. ``~/.gyptis/data`` by default.
"""
if data_path is None:
data_path = os.path.join("~", ".gyptis/data")
data_path = os.path.expanduser(data_path)
if not os.path.exists(data_path):
os.makedirs(data_path)
return data_path
def _get_config_path(config_path):
"""Return path to config file
Parameters
----------
config_path : str | None
The path to the data dir. ``~/.gyptis`` by default.
"""
if config_path is None:
config_path = os.path.join("~", ".gyptis")
config_path = os.path.expanduser(config_path)
else:
config_path = os.path.join(config_path, ".gyptis")
return config_path
def _load_config(config_path):
"""Safely load a config file."""
with open(config_path, "r") as fid:
try:
config = json.load(fid)
except ValueError:
# No JSON object could be decoded --> corrupt file?
msg = (
"The config file ({}) is not a valid JSON "
"file and might be corrupted".format(config_path)
)
raise RuntimeError(msg)
config = dict()
return config
def _set_config(config, key, value, config_file):
"""Set the configurations in the config file.
Parameters
----------
key : str
The preference key to set.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
config_path : str | None
The path to the .gyptis directory.
"""
if not isinstance(key, str):
raise TypeError("key must be of type str, got {} instead".format(type(key)))
if not isinstance(value, str):
raise TypeError("value must be of type str, got {} instead".format(type(value)))
if value is None:
config.pop(key, None)
else:
config[key] = value
# Write all values. This may fail if the default directory is not
# writeable.
config_path = os.path.dirname(config_file)
if not os.path.isdir(config_path):
os.mkdir(config_path)
with open(config_file, "w") as fid:
json.dump(config, fid, sort_keys=True, indent=0)
def download_data(
url, data_file_name, data_key="data_dir", data_path=None, config_path=None
):
"""Downloads a remote dataset and saves path to config file.
Checks if the data file already exists in either the path saved under the
key ``data_key`` in the config file or the default data path;
``~/.gyptis/data``. If the file does not exist, downloads the data
from ``url`` and saves to ``data_file_name`` in data_path. Finally, stores
the location of the data in a config file, under key ``data_key``. Returns
the path to the data file.
Parameters
----------
url : str
Dataset URL.
data_file_name : str
File name to save the dataset at.
config_key: str
The configuration key the data path is saved under.
data_path : str | None
The path to the data dir. ``~/.gyptis/data`` by default.
config_path: str | None
The path to the config file. ``~/.gyptis`` by default.
Returns
-------
data_file : str
Full path of the created file.
"""
if not isinstance(url, str):
raise TypeError(
"key must be of type str, got {} instead".format(type(config_key))
)
config_path = _get_config_path(config_path)
config_file = os.path.join(config_path, "gyptis_config.json")
if not os.path.isfile(config_file):
config = {}
else:
config = _load_config(config_file)
data_path = config.get(data_key, None)
if data_path:
data_file = os.path.join(data_path, data_file_name)
else:
data_path = _get_data_path(data_path=data_path)
data_file = os.path.join(data_path, data_file_name)
# Download file if it doesn't exist
if not os.path.exists(data_file):
urlretrieve(url, data_file)
# save download location in config
_set_config(config, data_key, data_path, config_file)
return data_file
def download_example_data(
data_file_name, example_dir, data_key="data_dir", data_path=None, config_path=None
):
url = (
"https://gitlab.com/gyptis/gyptis/-/raw/master/examples/"
+ example_dir
+ "/"
+ data_file_name
)
return download_data(
url,
data_file_name,
data_key=data_key,
data_path=data_path,
config_path=config_path,
)
|
the-stack_106_17776
|
#!/usr/bin/env python3
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pathlib
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from scipy import linalg
from scipy.misc import imread
from torch.nn.functional import adaptive_avg_pool2d
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x): return x
#from models import lenet
from models.inception import InceptionV3
from models.lenet import LeNet5
def get_activations(files, model, batch_size=50, dims=2048,
cuda=False, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
is_numpy = True if type(files[0]) == np.ndarray else False
if len(files) % batch_size != 0:
print(('Warning: number of images is not a multiple of the '
'batch size. Some samples are going to be ignored.'))
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
n_batches = len(files) // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, dims))
for i in tqdm(range(n_batches)):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches), end='', flush=True)
start = i * batch_size
end = start + batch_size
if is_numpy:
images = np.copy(files[start:end]) + 1
images /= 2.
else:
images = np.array([imread(str(f)).astype(np.float32)
for f in files[start:end]])
images /= 255.
# Reshape to (n_images, 3, height, width)
images = images.transpose((0, 3, 1, 2))
batch = torch.from_numpy(images).type(torch.FloatTensor)
if cuda:
batch = batch.cuda()
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1)
if verbose:
print('done', np.min(images))
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(act):
"""Calculation of the statistics used by the FID.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the inception model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def extract_lenet_features(imgs, net):
net.eval()
feats = []
imgs = imgs.reshape([-1, 100] + list(imgs.shape[1:]))
if imgs[0].min() < -0.001:
imgs = (imgs + 1)/2.0
print(imgs.shape, imgs.min(), imgs.max())
imgs = torch.from_numpy(imgs).cuda()
for i, images in enumerate(imgs):
feats.append(net.extract_features(images).detach().cpu().numpy())
feats = np.vstack(feats)
return feats
def _compute_activations(path, model, batch_size, dims, cuda, model_type):
if not type(path) == np.ndarray:
import glob
jpg = os.path.join(path, '*.jpg')
png = os.path.join(path, '*.png')
path = glob.glob(jpg) + glob.glob(png)
if len(path) > 25000:
import random
random.shuffle(path)
path = path[:25000]
if model_type == 'inception':
act = get_activations(path, model, batch_size, dims, cuda)
elif model_type == 'lenet':
act = extract_lenet_features(path, model)
return act
def calculate_fid_given_paths(paths, batch_size, cuda, dims, bootstrap=True, n_bootstraps=10, model_type='inception'):
"""Calculates the FID of two paths"""
pths = []
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
if os.path.isdir(p):
pths.append(p)
elif p.endswith('.npy'):
np_imgs = np.load(p)
if np_imgs.shape[0] > 25000:
np_imgs = np_imgs[:50000]
pths.append(np_imgs)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
if model_type == 'inception':
model = InceptionV3([block_idx])
elif model_type == 'lenet':
model = LeNet5()
model.load_state_dict(torch.load('./models/lenet.pth'))
if cuda:
model.cuda()
act_true = _compute_activations(pths[0], model, batch_size, dims, cuda, model_type)
n_bootstraps = n_bootstraps if bootstrap else 1
pths = pths[1:]
results = []
for j, pth in enumerate(pths):
print(paths[j+1])
actj = _compute_activations(pth, model, batch_size, dims, cuda, model_type)
fid_values = np.zeros((n_bootstraps))
with tqdm(range(n_bootstraps), desc='FID') as bar:
for i in bar:
act1_bs = act_true[np.random.choice(act_true.shape[0], act_true.shape[0], replace=True)]
act2_bs = actj[np.random.choice(actj.shape[0], actj.shape[0], replace=True)]
m1, s1 = calculate_activation_statistics(act1_bs)
m2, s2 = calculate_activation_statistics(act2_bs)
fid_values[i] = calculate_frechet_distance(m1, s1, m2, s2)
bar.set_postfix({'mean': fid_values[:i+1].mean()})
results.append((paths[j+1], fid_values.mean(), fid_values.std()))
return results
if __name__ == '__main__':
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--true', type=str, required=True,
help=('Path to the true images'))
parser.add_argument('--fake', type=str, nargs='+', required=True,
help=('Path to the generated images'))
parser.add_argument('--batch-size', type=int, default=50,
help='Batch size to use')
parser.add_argument('--dims', type=int, default=2048,
choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
help=('Dimensionality of Inception features to use. '
'By default, uses pool3 features'))
parser.add_argument('-c', '--gpu', default='', type=str,
help='GPU to use (leave blank for CPU only)')
parser.add_argument('--model', default='inception', type=str,
help='inception or lenet')
args = parser.parse_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
paths = [args.true] + args.fake
results = calculate_fid_given_paths(paths, args.batch_size, args.gpu != '', args.dims, model_type=args.model)
for p, m, s in results:
print('FID (%s): %.2f (%.3f)' % (p, m, s))
|
the-stack_106_17777
|
# -*- coding: utf-8 -*-
# stdlib imports
import subprocess
import re
import sys
# third-party imports
import pytest
import toml
HISTKEY = "black/mtimes"
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--black", action="store_true", help="enable format checking with black"
)
def pytest_collect_file(path, parent):
config = parent.config
if config.option.black and path.ext == ".py":
if hasattr(BlackItem, "from_parent"):
return BlackItem.from_parent(parent, fspath=path)
else:
return BlackItem(path, parent)
def pytest_configure(config):
# load cached mtimes at session startup
if config.option.black and hasattr(config, "cache"):
config._blackmtimes = config.cache.get(HISTKEY, {})
config.addinivalue_line("markers", "black: enable format checking with black")
def pytest_unconfigure(config):
# save cached mtimes at end of session
if hasattr(config, "_blackmtimes"):
config.cache.set(HISTKEY, config._blackmtimes)
class BlackItem(pytest.Item, pytest.File):
def __init__(self, fspath, parent):
super(BlackItem, self).__init__(fspath, parent)
self._nodeid += "::BLACK"
self.add_marker("black")
try:
with open("pyproject.toml") as toml_file:
settings = toml.load(toml_file)["tool"]["black"]
if "include" in settings.keys():
settings["include"] = self._re_fix_verbose(settings["include"])
if "exclude" in settings.keys():
settings["exclude"] = self._re_fix_verbose(settings["exclude"])
self.pyproject = settings
except Exception:
self.pyproject = {}
def setup(self):
pytest.importorskip("black")
mtimes = getattr(self.config, "_blackmtimes", {})
self._blackmtime = self.fspath.mtime()
old = mtimes.get(str(self.fspath), 0)
if self._blackmtime == old:
pytest.skip("file(s) previously passed black format checks")
if self._skip_test():
pytest.skip("file(s) excluded by pyproject.toml")
def runtest(self):
cmd = [sys.executable, "-m", "black", "--check", "--diff", "--quiet", str(self.fspath)]
try:
subprocess.run(
cmd, check=True, stdout=subprocess.PIPE, universal_newlines=True
)
except subprocess.CalledProcessError as e:
raise BlackError(e)
mtimes = getattr(self.config, "_blackmtimes", {})
mtimes[str(self.fspath)] = self._blackmtime
def repr_failure(self, excinfo):
if excinfo.errisinstance(BlackError):
return excinfo.value.args[0].stdout
return super(BlackItem, self).repr_failure(excinfo)
def reportinfo(self):
return (self.fspath, -1, "Black format check")
def _skip_test(self):
return self._excluded() or (not self._included())
def _included(self):
if "include" not in self.pyproject:
return True
return re.search(self.pyproject["include"], str(self.fspath))
def _excluded(self):
if "exclude" not in self.pyproject:
return False
return re.search(self.pyproject["exclude"], str(self.fspath))
def _re_fix_verbose(self, regex):
if "\n" in regex:
regex = "(?x)" + regex
return re.compile(regex)
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
return (self,)
class BlackError(Exception):
pass
|
the-stack_106_17778
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Summaries for the example_basic plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorboard.util import tensor_util
from tensorboard.compat.proto import summary_pb2
from tensorboard_plugin_example import metadata
def greeting(name, guest, step=None, description=None):
"""Write a "greeting" summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
guest: A rank-0 string `Tensor`.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
with tf.summary.experimental.summary_scope(
name, "greeting_summary", values=[guest, step],
) as (tag, _):
return tf.summary.write(
tag=tag,
tensor=tf.strings.join(["Hello, ", guest, "!"]),
step=step,
metadata=_create_summary_metadata(description),
)
def _create_summary_metadata(description):
return summary_pb2.SummaryMetadata(
summary_description=description,
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=metadata.PLUGIN_NAME,
content=b"", # no need for summary-specific metadata
),
)
|
the-stack_106_17779
|
import unittest
import os
import importlib
import glob
import matplotlib
matplotlib.use("Agg")
dirname, filename = os.path.split(os.path.abspath(__file__))
example_dir = dirname.split(os.path.sep)[:-2] + ["examples"]
dirs_to_test = ["07-nsem", "08-vrm", "09-flow", "10-pgi", "20-published"]
class ExampleTest(unittest.TestCase):
pass
def create_runner(script_path):
def test_script(self):
spec = importlib.util.spec_from_file_location("module.name", script_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
try:
mod.run() # some files are defined in a run command
except AttributeError as err:
if "has no attribute 'run'" not in str(err):
raise err
return test_script
# Programatically add tests to Examples
for dir in dirs_to_test:
script_dir = os.path.sep.join(example_dir + [dir])
os.chdir(script_dir)
scripts = glob.glob(os.path.sep.join([script_dir] + ["*.py"]))
scripts.sort()
for script in scripts:
script_name = "_".join(script.split(os.path.sep)[-2:])
test_method = create_runner(script)
test_method.__name__ = "test_" + script_name
setattr(ExampleTest, test_method.__name__, test_method)
test_method = None # Necessary to stop nosetest from running it at the end
|
the-stack_106_17782
|
#!/usr/bin/env python
import subprocess, time, csv
from multiprocessing import Pool
QUEUE_SIZE = 3
SLEEP_TIME = 1 #in minutes
WAIT_TIME = 4*60 #in minutes
max_trial = WAIT_TIME//SLEEP_TIME
def execute_command(command_tuple):
qsub_command = command_tuple[0]
command_id = command_tuple[1]
tmp_file = 'tmp/comm_'+str(command_id)
trial = 0
while(True):
exit_status = subprocess.call(qsub_command, shell=True, stdout=open(tmp_file, 'w'))
if exit_status is 1: # Check to make sure the job submitted
print("Job %s failed to submit" % qsub_command)
return
line = open(tmp_file).readline()
if '.sdb' in line:
l = line.split()
job = l[0]
print('Job started: '+job)
break
else:
trial += 1
time.sleep(SLEEP_TIME*60)
if trial > max_trial:
print("Failed to execute command: "+qsub_command)
return
time.sleep(SLEEP_TIME*60)
while(True):
check_command = 'qstat -n '+job
with open(tmp_file, 'w') as f:
exit_status = subprocess.call(check_command, shell=True, stdout=f, stderr=f)
if exit_status is 1: # Check to make sure the job submitted
print("Job %s failed to submit" % qsub_command)
return
lines = open(tmp_file).readlines()
line = ' '.join(lines)
if 'Job has finished' in line:
print('Job completed: '+job)
break
time.sleep(SLEEP_TIME*60)
subprocess.call('rm '+tmp_file, shell=True)
command_list = []
with open('params_file.csv') as csvfile:
reader = csv.DictReader(csvfile)
for count, param_dict in enumerate(reader):
TRPATHS = param_dict['TRAIN_PATHS']
VPATHS = param_dict['VAL_PATHS']
TEPATHS = param_dict['TEST_PATHS']
CPATH = param_dict['CHP_PATH']
FPATH = param_dict['FW_PATH']
MTHR = param_dict['MAX_THR']
DSTEP = param_dict['DIS_STEP']
NCLASSES = param_dict['NUM_CLASSES']
EDIM = param_dict['EMB_DIM']
NTR_LAYERS = param_dict['NUM_TR_LAYERS']
LFUNC = param_dict['LOSS_FUNC']
KPROB = param_dict['KEEP_PROB']
EX = param_dict['EXP']
LR = param_dict['LEARNING_RATE']
NEPOCHS = param_dict['NUM_EPOCHS']
BSIZE = param_dict['BATCH_SIZE']
OUT='IMGNET_EMB'+str(EDIM)+'_NLAYERS'+str(NTR_LAYERS)+'_LFUNC'+LFUNC+'_KPROB'+str(KPROB)+'_EXP'+str(EX)+'_LR'+str(LR)+'_BS'+str(BSIZE)
qsub_command = 'qsub -v TRAIN_PATHS='+TRPATHS+ \
',VAL_PATHS='+VPATHS+ \
',TEST_PATHS='+TEPATHS+ \
',CHP_PATH='+CPATH+ \
',FW_PATH='+FPATH+ \
',MAX_THR='+str(MTHR)+ \
',DIS_STEP='+str(DSTEP)+ \
',NUM_CLASSES='+str(NCLASSES)+ \
',EMB_DIM='+str(EDIM)+ \
',NUM_TR_LAYERS='+str(NTR_LAYERS)+ \
',LOSS_FUNC='+LFUNC+ \
',KEEP_PROB='+str(KPROB)+ \
',EXP='+str(EX)+ \
',LEARNING_RATE='+str(LR)+ \
',NUM_EPOCHS='+str(NEPOCHS)+ \
',BATCH_SIZE='+str(BSIZE)+ \
',OUTPUT='+OUT+ \
' gpu_exp.pbs'
command_list.append((qsub_command, count))
command_exe_pool = Pool(QUEUE_SIZE)
command_exe_pool.map(execute_command, command_list)
|
the-stack_106_17783
|
"""
Provides helper functions to parse url/query
parameters from aiohttp.
"""
def parse_int(value, allow_non_zero=False):
"""
Parses the given value and returns
it as an integer.
Args:
value (str): The string to be parsed.
allow_non_zero (bool): If False, all values below 1 will
be set to 1.
Returns:
int: The parsed value.
"""
if not value:
return None
try:
parsed_val = int(value)
except ValueError:
return None
if not allow_non_zero:
return 1 if parsed_val < 1 else parsed_val
return parsed_val
|
the-stack_106_17785
|
import logging
import os
from abc import ABC, abstractmethod
from typing import Optional
from checkov.terraform.module_loading.content import ModuleContent
from checkov.terraform.module_loading.registry import module_loader_registry
# ModuleContent allows access to a directory containing module file via the `path()`
# function. Instances may be used in a `with` context to ensure temporary directories
# are removed, if applicable.
class ModuleLoader(ABC):
def __init__(self) -> None:
module_loader_registry.register(self)
self.logger = logging.getLogger(__name__)
self.module_source = None
self.current_dir = None
self.dest_dir = None
self.version = 'latest'
self.is_external = True
self.inner_module = ''
def load(self, current_dir: str, source: str, source_version: Optional[str], dest_dir, inner_module=Optional[str]) -> ModuleContent:
"""
This function provides an opportunity for the loader to load a module's content if it chooses to do so.
There are three resulting states that can occur when calling this function:
1) the loader can't handle the source type, in which case a ModuleContent is returned for which
the `loaded()` method will return False.
2) the loader can handle the source type and loading is successful, in which case a ModuleContent
object is returned for which `loaded()` returns True and which provides the directory containing
the module files
3) the loader tried to load the module content but and error occurred, in which case an exception
is raised.
:param current_dir: Directory containing the reference to the module.
:param source: the raw source string from the module's `source` attribute (e.g.,
"hashicorp/consul/aws" or "git::https://example.com/vpc.git?ref=v1.2.0")
:param source_version: contains content from the module's `version` attribute, if provided
:param dest_dir: where to save the downloaded module
:return: A ModuleContent object which may or may not being loaded.
"""
self.module_source = source
self.current_dir = current_dir
self.version = str(source_version)
self.dest_dir = dest_dir
self.inner_module = inner_module
if os.path.exists(self.dest_dir):
return ModuleContent(dir=self.dest_dir)
if not self._is_matching_loader():
return ModuleContent(dir=None)
self.logger.debug(f'getting module {self.module_source} version: {self.version}')
return self._load_module()
@abstractmethod
def _is_matching_loader(self) -> bool:
raise NotImplementedError()
@abstractmethod
def _load_module(self) -> ModuleContent:
raise NotImplementedError()
|
the-stack_106_17786
|
import overpy
import numpy as np
from selfdrive.mapd.lib.geo import R
def create_way(way_id, node_ids, from_way):
"""
Creates and OSM Way with the given `way_id` and list of `node_ids`, copying attributes and tags from `from_way`
"""
return overpy.Way(way_id, node_ids=node_ids, attributes={}, result=from_way._result,
tags=from_way.tags)
class OSM():
def __init__(self):
self.api = overpy.Overpass()
# self.api = overpy.Overpass(url='http://3.65.170.21/api/interpreter')
def fetch_road_ways_around_location(self, lat, lon, radius):
# Calculate the bounding box coordinates for the bbox containing the circle around location.
bbox_angle = np.degrees(radius / R)
# fetch all ways and nodes on this ways in bbox
bbox_str = f'{str(lat - bbox_angle)},{str(lon - bbox_angle)},{str(lat + bbox_angle)},{str(lon + bbox_angle)}'
q = """
way(""" + bbox_str + """)
[highway]
[highway!~"^(footway|path|corridor|bridleway|steps|cycleway|construction|bus_guideway|escape|service|track)$"];
(._;>;);
out;
"""
try:
ways = self.api.query(q).ways
except Exception as e:
print(f'Exception while querying OSM:\n{e}')
ways = []
return ways
|
the-stack_106_17788
|
import unittest
from unittest import TestCase
import numpy as np
import os, sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import src.data_processing as data_processing
class TestDataProcessing(TestCase):
def test_reading_file(self):
"""
"""
test_filepath = "../data/test_data/basic_test_data.txt"
text = data_processing.read_txt_file(test_filepath)
self.assertEqual(text, "Hello hello hello there this is a test file, what's it to you? Is it a big deal. Hello? ? 32.123")
clean_text = data_processing.clean_text(text)
self.assertEqual(clean_text, "Hello hello hello there this is a test file, whats it to you? Is it a big deal. Hello? ? 32.123")
word_list = data_processing.convert_text_to_word_list(clean_text)
self.assertEqual(word_list, ['hello', 'hello', 'hello', 'there', 'this', 'is', 'a', 'test', 'file', 'whats', 'it', 'to', 'you', 'is', 'it', 'a', 'big', 'deal', 'hello'])
print(word_list)
frequency_dist = data_processing.convert_word_list_to_frequency_dist(word_list)
self.assertEqual(frequency_dist, [4, 2, 2, 2,1,1,1,1,1,1,1,1,1])
frequency_counts = data_processing.convert_frequency_dist_to_frequency_counts(frequency_dist)
self.assertEqual(frequency_counts, [9, 3, 0, 1])
frequency_dist_2 = data_processing.convert_frequency_counts_to_frequency_dist(frequency_counts)
self.assertEqual(frequency_dist, frequency_dist_2)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_17790
|
"""
custom-menu setup
"""
import json
from pathlib import Path
from jupyter_packaging import (
create_cmdclass,
install_npm,
ensure_targets,
combine_commands,
skip_if_exists
)
import setuptools
HERE = Path(__file__).parent.resolve()
# The name of the project
name = "custom-menu"
lab_path = (HERE / name / "labextension")
# Representative files that should exist after a successful build
jstargets = [
str(lab_path / "package.json"),
]
package_data_spec = {
name: ["*"],
}
labext_name = "custom-menu"
data_files_spec = [
("share/jupyter/labextensions/%s" % labext_name, str(lab_path), "**"),
("share/jupyter/labextensions/%s" % labext_name, str(HERE), "install.json"),
]
cmdclass = create_cmdclass("jsdeps",
package_data_spec=package_data_spec,
data_files_spec=data_files_spec
)
js_command = combine_commands(
install_npm(HERE, build_cmd="build:prod", npm=["jlpm"]),
ensure_targets(jstargets),
)
is_repo = (HERE / ".git").exists()
if is_repo:
cmdclass["jsdeps"] = js_command
else:
cmdclass["jsdeps"] = skip_if_exists(jstargets, js_command)
long_description = (HERE / "README.md").read_text()
# Get the package info from package.json
pkg_json = json.loads((HERE / "package.json").read_bytes())
setup_args = dict(
name=name,
version=pkg_json["version"],
url=pkg_json["homepage"],
author=pkg_json["author"]["name"],
author_email=pkg_json["author"]["email"],
description=pkg_json["description"],
license=pkg_json["license"],
long_description=long_description,
long_description_content_type="text/markdown",
cmdclass=cmdclass,
packages=setuptools.find_packages(),
install_requires=[
"jupyterlab~=3.0",
],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Framework :: Jupyter",
],
)
if __name__ == "__main__":
setuptools.setup(**setup_args)
|
the-stack_106_17791
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xfindproxy(AutotoolsPackage):
"""xfindproxy is used to locate available X11 proxy services.
It utilizes the Proxy Management Protocol to communicate with a proxy
manager. The proxy manager keeps track of all available proxy
services, starts new proxies when necessary, and makes sure that
proxies are shared whenever possible."""
homepage = "http://cgit.freedesktop.org/xorg/app/xfindproxy"
url = "https://www.x.org/archive/individual/app/xfindproxy-1.0.4.tar.gz"
version('1.0.4', sha256='fa6152fcf9c16fbb2ef52259731df5df899a39a86894b0508456613f26ff924a')
depends_on('libice')
depends_on('libxt')
depends_on('xproto', type='build')
depends_on('xproxymanagementprotocol', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
the-stack_106_17792
|
"""
sentry.tsdb.redis
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from binascii import crc32
from collections import defaultdict
from datetime import timedelta
from hashlib import md5
from django.conf import settings
from django.utils import timezone
from nydus.db import create_cluster
import six
from sentry.tsdb.base import BaseTSDB
class RedisTSDB(BaseTSDB):
"""
A time series storage implementation which maps types + normalized epochs
to hash buckets.
Since each hash keyspace is an epoch, TTLs are applied to the entire bucket.
This ends up looking something like the following inside of Redis:
{
"TSDBModel:epoch:shard": {
"Key": Count
}
}
In our case, this translates to:
{
"Group:epoch:shard": {
"GroupID": Count
}
}
- ``vnodes`` controls the shard distribution and should ideally be set to
the maximum number of physical hosts.
"""
def __init__(self, hosts=None, router=None, prefix='ts:', vnodes=64,
**kwargs):
# inherit default options from REDIS_OPTIONS
defaults = settings.SENTRY_REDIS_OPTIONS
if hosts is None:
hosts = defaults.get('hosts', {0: {}})
if router is None:
router = defaults.get('router', 'nydus.db.routers.keyvalue.PartitionRouter')
self.conn = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': router,
'hosts': hosts,
})
self.prefix = prefix
self.vnodes = vnodes
super(RedisTSDB, self).__init__(**kwargs)
def make_key(self, model, epoch, model_key):
if isinstance(model_key, six.integer_types):
vnode = model_key % self.vnodes
else:
vnode = crc32(model_key) % self.vnodes
return '{0}{1}:{2}:{3}'.format(self.prefix, model.value, epoch, vnode)
def get_model_key(self, key):
# We specialize integers so that a pure int-map can be optimized by
# Redis, whereas long strings (say tag values) will store in a more
# efficient hashed format.
if not isinstance(key, six.integer_types):
return md5(repr(key)).hexdigest()
return key
def incr(self, model, key, timestamp=None, count=1):
self.incr_multi([(model, key)], timestamp, count)
def incr_multi(self, items, timestamp=None, count=1):
"""
Increment project ID=1 and group ID=5:
>>> incr_multi([(TimeSeriesModel.project, 1), (TimeSeriesModel.group, 5)])
"""
make_key = self.make_key
normalize_to_rollup = self.normalize_to_rollup
if timestamp is None:
timestamp = timezone.now()
with self.conn.map() as conn:
for rollup, max_values in self.rollups:
norm_epoch = normalize_to_rollup(timestamp, rollup)
for model, key in items:
model_key = self.get_model_key(key)
hash_key = make_key(model, norm_epoch, model_key)
conn.hincrby(hash_key, model_key, count)
conn.expire(hash_key, rollup * max_values)
def get_range(self, model, keys, start, end, rollup=None):
"""
To get a range of data for group ID=[1, 2, 3]:
Start and end are both inclusive.
>>> now = timezone.now()
>>> get_keys(TimeSeriesModel.group, [1, 2, 3],
>>> start=now - timedelta(days=1),
>>> end=now)
"""
normalize_to_epoch = self.normalize_to_epoch
normalize_to_rollup = self.normalize_to_rollup
make_key = self.make_key
if rollup is None:
rollup = self.get_optimal_rollup(start, end)
results = []
timestamp = end
with self.conn.map() as conn:
while timestamp >= start:
real_epoch = normalize_to_epoch(timestamp, rollup)
norm_epoch = normalize_to_rollup(timestamp, rollup)
for key in keys:
model_key = self.get_model_key(key)
hash_key = make_key(model, norm_epoch, model_key)
results.append((real_epoch, key, conn.hget(hash_key, model_key)))
timestamp = timestamp - timedelta(seconds=rollup)
results_by_key = defaultdict(dict)
for epoch, key, count in results:
results_by_key[key][epoch] = int(count or 0)
for key, points in results_by_key.iteritems():
results_by_key[key] = sorted(points.items())
return dict(results_by_key)
|
the-stack_106_17793
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training and eval worker utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
from . import logging_utils
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
class BaseModel(object):
def train_fn(self, x_bhwc):
raise NotImplementedError
def eval_fn(self, x_bhwc):
raise NotImplementedError
def samples_fn(self, x_bhwc):
raise NotImplementedError
@property
def trainable_variables(self):
raise NotImplementedError
@property
def ema(self):
raise NotImplementedError
def _make_ema_model(orig_model, model_constructor):
# Model with EMA parameters
if orig_model.ema is None:
return None
def _to_original_variable_name(name):
# map to the original variable name
parts = name.split('/')
assert parts[0] == 'ema_scope'
return '/'.join(parts[1:])
def _ema_getter(getter, name, *args, **kwargs):
v = getter(_to_original_variable_name(name), *args, **kwargs)
v = orig_model.ema.average(v)
if v is None:
raise RuntimeError('invalid EMA variable name {} -> {}'.format(
name, _to_original_variable_name(name)))
return v
with tf.variable_scope(
tf.get_variable_scope(), custom_getter=_ema_getter, reuse=True):
with tf.name_scope('ema_scope'):
return model_constructor()
def run_eval(
model_constructor,
logdir,
total_bs,
master,
input_fn,
dataset_size):
worker = EvalWorker(
master=master,
model_constructor=model_constructor,
total_bs=total_bs,
input_fn=input_fn)
worker.run(logdir=logdir, once=True)
class EvalWorker(object):
def __init__(self, master, model_constructor, total_bs, input_fn):
self.strategy = tf.distribute.MirroredStrategy()
self.num_cores = self.strategy.num_replicas_in_sync
assert total_bs % self.num_cores == 0
self.total_bs = total_bs
self.local_bs = total_bs // self.num_cores
logging.info('num cores: {}'.format(self.num_cores))
logging.info('total batch size: {}'.format(self.total_bs))
logging.info('local batch size: {}'.format(self.local_bs))
with self.strategy.scope():
# Dataset iterator
dataset = input_fn(params={'batch_size': self.total_bs})
self.eval_iterator = self.strategy.experimental_distribute_dataset(
dataset).make_initializable_iterator()
eval_iterator_next = next(self.eval_iterator)
# Model
self.model = model_constructor()
# Model with EMA parameters
self.ema_model = _make_ema_model(self.model, model_constructor)
# Global step
self.global_step = tf.train.get_global_step()
assert self.global_step is not None, 'global step not created'
# Eval/samples graphs
self.eval_outputs = self._distributed(
self.model.eval_fn, args=(eval_iterator_next,), reduction='mean')
self.samples_outputs = self._distributed(
self.model.samples_fn, args=(eval_iterator_next,), reduction='concat')
# EMA versions of the above
if self.ema_model is not None:
self.ema_eval_outputs = self._distributed(
self.ema_model.eval_fn,
args=(eval_iterator_next,),
reduction='mean')
self.ema_samples_outputs = self._distributed(
self.ema_model.samples_fn,
args=(eval_iterator_next,),
reduction='concat')
def _distributed(self, model_fn, args, reduction):
"""Sharded computation."""
def model_wrapper(inputs_):
return model_fn(inputs_['image'])
out = self.strategy.run(model_wrapper, args=args)
assert isinstance(out, dict)
if reduction == 'mean':
out = {
k: tf.reduce_mean(self.strategy.reduce('mean', v))
for k, v in out.items()
}
assert all(v.shape == [] for v in out.values()) # pylint: disable=g-explicit-bool-comparison
elif reduction == 'concat':
out = {
k: tf.concat(self.strategy.experimental_local_results(v), axis=0)
for k, v in out.items()
}
assert all(v.shape[0] == self.total_bs for v in out.values())
else:
raise NotImplementedError(reduction)
return out
def _make_session(self):
config = tf.ConfigProto()
config.allow_soft_placement = True
logging.info('making session...')
return tf.Session(config=config)
def _run_eval(self, sess, ema):
logging.info('eval pass...')
sess.run(self.eval_iterator.initializer)
all_loss_lists = collections.defaultdict(list)
run_times = []
try:
while True:
# Log progress
if run_times and len(run_times) % 100 == 0:
num_batches_seen = len(list(all_loss_lists.values())[0])
logging.info(
'eval examples_so_far={} time_per_batch={:.5f} {}'.format(
num_batches_seen * self.total_bs,
np.mean(run_times[1:]),
{k: np.mean(l) for k, l in all_loss_lists.items()}))
tstart = time.time()
results = sess.run(self.ema_eval_outputs if ema else self.eval_outputs)
run_times.append(time.time() - tstart)
for k, v in results.items():
all_loss_lists[k].append(v)
except tf.errors.OutOfRangeError:
pass
num_batches_seen = len(list(all_loss_lists.values())[0])
logging.info('eval pass done ({} batches, {} examples)'.format(
num_batches_seen, num_batches_seen * self.total_bs))
results = {k: np.mean(l) for k, l in all_loss_lists.items()}
logging.info('final eval results: {}'.format(results))
return results
def _run_sampling(self, sess, ema):
sess.run(self.eval_iterator.initializer)
logging.info('sampling...')
samples = sess.run(
self.ema_samples_outputs if ema else self.samples_outputs)
logging.info('sampling done')
return samples
def _write_eval_and_samples(self, sess, log, curr_step, prefix, ema):
# Samples
samples_dict = self._run_sampling(sess, ema=ema)
for k, v in samples_dict.items():
assert len(v.shape) == 4 and v.shape[0] == self.total_bs
log.summary_writer.images(
'{}/{}'.format(prefix, k),
np.clip(v, 0, 255).astype('uint8'),
step=curr_step)
log.summary_writer.flush()
# Eval
eval_losses = self._run_eval(sess, ema=ema)
for k, v in eval_losses.items():
log.write(prefix, [{k: v}], step=curr_step)
def run(self, logdir, once, skip_non_ema_pass=True):
"""Runs the eval/sampling worker loop.
Args:
logdir: directory to read checkpoints from
once: if True, writes results to a temporary directory (not to logdir),
and exits after evaluating one checkpoint.
"""
if once:
eval_logdir = os.path.join(logdir, 'eval_once_{}'.format(time.time()))
else:
eval_logdir = logdir
logging.info('Writing eval data to: {}'.format(eval_logdir))
eval_log = logging_utils.Log(eval_logdir, write_graph=False)
with self._make_session() as sess:
# Checkpoint loading
logging.info('making saver')
saver = tf.train.Saver()
for ckpt in tf.train.checkpoints_iterator(logdir):
logging.info('restoring params...')
saver.restore(sess, ckpt)
global_step_val = sess.run(self.global_step)
logging.info('restored global step: {}'.format(global_step_val))
if not skip_non_ema_pass:
logging.info('non-ema pass')
self._write_eval_and_samples(
sess,
log=eval_log,
curr_step=global_step_val,
prefix='eval',
ema=False)
if self.ema_model is not None:
logging.info('ema pass')
self._write_eval_and_samples(
sess,
log=eval_log,
curr_step=global_step_val,
prefix='eval_ema',
ema=True)
if once:
break
|
the-stack_106_17794
|
import numpy as np
import sys
import os
import pandas as pd
import glob
import subprocess
import scanpy as sc
from agutil import parallel
from anndata import AnnData
from typing import Union
from tqdm import tqdm
import pkg_resources
from typing import Union
import rpy2
from rpy2.robjects.packages import importr
"""
Storey Q-Values - https://github.com/StoreyLab/qvalue
--------------------
Python Wrapper
Author: Francois Aguet
https://github.com/broadinstitute/tensorqtl/blob/master/tensorqtl/rfunc.py
"""
def qvalue(p, lambda_qvalue=None):
"""Wrapper for qvalue::qvalue"""
qvalue = importr("qvalue")
rp = rpy2.robjects.vectors.FloatVector(p)
if lambda_qvalue is None:
q = qvalue.qvalue(rp)
else:
if not isinstance(lambda_qvalue, Iterable):
lambda_qvalue = [lambda_qvalue]
rlambda = rpy2.robjects.vectors.FloatVector(lambda_qvalue)
q = qvalue.qvalue(rp, **{'lambda':rlambda})
qval = np.array(q.rx2('qvalues'))
pi0 = np.array(q.rx2('pi0'))[0]
return qval, pi0
"""
GLMM Pipeline Functions
------------
* prep_inputs --> prepare inputs from AnnData for lme4 tests
* dispatch --> dispatch jobs on Canine
* compile_de_result --> compiles result from a single diffexp canine output
* compile_all_results --> compiles all outputs
"""
def prep_inputs(
adata: AnnData,
diffexp_dir: str,
groupby: str,
meta_vars: Union[None,list] = None,
genes_to_use: Union[None, list] = None,
n_threads: int = 15
):
"""
Prepare Inputs for Differential Expression
-------------------------------
Args:
* adata: AnnData file
* diffexp_dir: output directory to write out inptus for lme4 to
* groupby: variable to groupy
* meta_vars: list of variables in metadata
* genes_to_use: list of genes to use for differential expression tests
* n_threads: nthreads for saving gene counts
Returns:
None
"""
meta_vars = meta_vars.copy()
if meta_vars is None:
meta_vars = list(adata.obs)
if genes_to_use is None:
genes_to_use = adata.var_names
for v in meta_vars:
assert v in list(adata.obs), print("{} not in adata.obs.".format(v))
assert groupby in list(adata.obs), "{} not in adata.obs".format(groupby)
if groupby not in meta_vars:
meta_vars.append(groupby)
os.makedirs(diffexp_dir, exist_ok=True)
os.makedirs(os.path.join(diffexp_dir, "inputs"), exist_ok=True)
os.makedirs(os.path.join(diffexp_dir, "inputs", "genes"), exist_ok=True)
# --------------------------
# Create counts dataframe
# --------------------------
counts_df = pd.DataFrame(
adata.layers['counts'].todense(),
index=adata.obs_names,
columns=adata.var_names,
dtype=int
)[genes_to_use]
print(" * {} barcodes".format(counts_df.shape[0]))
print(" * {} genes".format(counts_df.shape[1]))
counts_df.T.to_csv(os.path.join(diffexp_dir, "inputs", "raw_counts.csv"))
counts_df.T.to_parquet(os.path.join(diffexp_dir, "inputs", "raw_counts.parquet"))
print(" * {} covariates".format(meta_vars))
meta_df = adata.obs[meta_vars]
dummy_df = pd.get_dummies(meta_df[groupby])
dummy_df = dummy_df.rename(columns={x:"groupby_"+x for x in dummy_df})
meta_df.join(dummy_df).to_csv(os.path.join(diffexp_dir, "inputs", "meta.csv"))
@parallel.parallelize(maximum=n_threads)
def save_slice(gene_i):
gene = gene_i.replace("/",'_')
try:
counts_df[gene_i].to_csv(os.path.join(diffexp_dir, "inputs", "genes", "{}.csv".format(gene)), header=False)
except:
print("Error with {}".format(gene))
print(" * saving {} genes...".format(counts_df.shape[1]))
_ = [x for x in save_slice(counts_df)]
def dispatch(
meta: str,
genes_dir: str,
dispersions: str,
model: str,
transfer_bucket: str,
n_nodes: int = 1,
worker_type: str = "n1-highmem-4",
verbose:bool = False,
):
"""
Dispatch.
----------------------
Args:
* meta: path to metadata dataframe (barcodes x covariates) processed by prep.py
* genes_dir: directory of all genes
* dispersions:
* model:
* transfer_bucket:
* n_nodes:
* worker_type
"""
meta = os.path.abspath(meta)
genes_dir = os.path.abspath(genes_dir)
dispersions = os.path.abspath(dispersions)
genes = [x.split(".csv")[0] for x in os.listdir(genes_dir)]
print("Loaded {} genes.".format(len(genes)))
conf = dict()
conf["name"] = "lme4_diffexp"
conf["backend"] = {
"type": "TransientGCP",
"compute_zone": "us-central1-a",
"controller_type": "n1-highmem-16",
"secondary_disk_size": 100,
"worker_type": worker_type,
"max_node_count": n_nodes
}
conf["localization"] = {
"staging_dir": "/mnt/disks/sec/canine",
"transfer_bucket": transfer_bucket
}
conf["inputs"] = dict()
conf["inputs"]["SCRIPT"] = pkg_resources.resource_filename('mudi', 'diffexp/glmm/lme4_lrt.R')
conf["inputs"]["METADATA"] = meta
conf["inputs"]["DISP"] = dispersions
conf["inputs"]["COUNTS"] = [os.path.join(genes_dir, "{}.csv".format(gene)) for gene in genes]
conf["outputs"] = dict()
conf["outputs"]["output"] = "*.tsv"
conf["resources"] = {
"cpus-per-task": 1,
"mem-per-cpu": 6144
}
conf["script"] = ["set -e -o pipefail"]
conf["script"].extend([
"sudo docker run -v $CANINE_ROOT:$CANINE_ROOT --rm \
--cpus $SLURM_CPUS_PER_TASK \
--memory $(expr $SLURM_CPUS_PER_TASK '*' $SLURM_MEM_PER_CPU)MB \
-t gcr.io/broad-cga-sanand-gtex/r36:latest Rscript --vanilla $SCRIPT -i $COUNTS -m $METADATA -d $DISP -f '{}' -o $CANINE_JOB_ROOT".format(model)
])
if verbose:
print("Running:")
print(conf["script"])
orch = canine.Orchestrator(config=conf)
R = orch.run_pipeline()
def compile_de_result(out_dir: str):
"""
Compile gene-level differential expression result
------------------------
Args:
* out_dir: ingividual output directory per gene
"""
summary_tsv = glob.glob(os.path.join(out_dir, "*.summary.tsv"))[0]
lrt_tsv = glob.glob(os.path.join(out_dir, "*.lrt.tsv"))[0]
gene = summary_tsv.split("/")[-1].split(".")[0]
summary_df = pd.read_csv(summary_tsv, sep='\t')
pval_df = summary_df.join(pd.read_csv(lrt_tsv, sep='\t')).dropna().rename(columns={"Pr(>Chisq)":"lrt_p_val"}).drop(columns="celltype")
pval_df = pval_df.rename(index={x:x.split("groupby_")[-1] for x in pval_df.index})
pval_df['gene'] = gene
summary_df['gene'] = gene
summary_df = summary_df.rename(columns={'celltype':'groupby'})
summary_df['groupby'] = summary_df['groupby'].apply(lambda x: x.split("groupby_")[-1])
return pval_df, summary_df
def compile_all_results(output_dirs: list, canine_dir_name: str = "canine_output"):
"""
Compile all results.
------------------------
Args:
* output_dirs: list of directories where canine_outputs are located
"""
pvals = list()
sums = list()
for output_dir in output_dirs:
file_list = glob.glob(os.path.join(output_dir, canine_dir_name, "**/output"))
for x in tqdm(file_list, desc=output_dir):
p,s = compile_de_result(x)
pvals.append(p)
sums.append(s)
# ------------------------
# Q-values for lrt-pvalues
# ------------------------
p_df = pd.concat(pvals)
_p_df = list()
for group in np.unique(p_df.index):
group_df = p_df.loc[group].sort_values('lrt_p_val')
group_df['qval'], pi0 = qvalue(np.array(group_df['lrt_p_val']))
_p_df.append(group_df)
p_df = pd.concat(_p_df)
p_df = p_df.loc[:,['gene','lrt_p_val','qval','coef','stderr','z','p_val','Df','AIC','BIC','logLik','deviance','Chisq','Chi Df']].rename(columns={'coef':'logFC'})
# ------------------------
# Q-values for all covariates
# ------------------------
s_df = pd.concat(sums)
_s_df = list()
vals = s_df.iloc[0].loc[['groupby','gene']]
labs = s_df[(s_df['groupby']==vals[0]) & (s_df['gene']==vals[1])].index
for idx,lab in enumerate(labs):
group_df = s_df.iloc[idx::4,:].sort_values('p_val')
if lab.startswith("groupby"):
group_df['label'] = "groupby"
else:
group_df['label'] = lab
group_df['qval'], pi0 = qvalue(np.array(group_df['p_val']))
_s_df.append(group_df)
s_df = pd.concat(_s_df).set_index('label')
s_df = s_df.loc[:,["gene","groupby","coef","p_val","qval","z","stderr"]].rename(columns={"coef":"logFC"})
return p_df, s_df
|
the-stack_106_17795
|
#!/usr/bin/env python
import grp
import os
import pwd
import sys
from odkim_rotate.key_table import *
from odkim_rotate.manager import *
from odkim_rotate.utils import *
def main(verbose):
manager = Manager(verbose)
manager.opendkim_conf = '/etc/opendkim.conf'
manager.opendkim_keys_basedir = '/etc/dkimkeys'
manager.opendkim_genkey = '/usr/bin/opendkim-genkey'
manager.opendkim_testkey = '/usr/bin/opendkim-testkey'
manager.key_owner = 'opendkim'
manager.key_owner_uid = pwd.getpwnam(manager.key_owner).pw_uid
manager.key_group = 'opendkim'
manager.key_group_gid = grp.getgrnam(manager.key_group).gr_gid
manager.dns_provider = create_dns_provider('linode')
manager.keytable_path = get_keytable_path(manager.opendkim_conf)
manager.keytable = KeyTable(manager.keytable_path)
manager.rotate_keys()
if __name__ == '__main__':
if os.getenv('USER') != 'root':
print('Error: script must be run as root')
sys.exit(os.EX_USAGE)
main(len(sys.argv) == 2 and sys.argv[1] == '-v')
|
the-stack_106_17796
|
from __future__ import annotations
import datetime
from functools import partial
from textwrap import dedent
import warnings
import numpy as np
from pandas._libs.tslibs import Timedelta
import pandas._libs.window.aggregations as window_aggregations
from pandas._typing import (
Axis,
FrameOrSeries,
FrameOrSeriesUnion,
TimedeltaConvertibleTypes,
)
from pandas.compat.numpy import function as nv
from pandas.util._decorators import doc
from pandas.core.dtypes.common import is_datetime64_ns_dtype
from pandas.core.dtypes.missing import isna
import pandas.core.common as common # noqa: PDF018
from pandas.core.util.numba_ import maybe_use_numba
from pandas.core.window.common import zsqrt
from pandas.core.window.doc import (
_shared_docs,
args_compat,
create_section_header,
kwargs_compat,
numba_notes,
template_header,
template_returns,
template_see_also,
window_agg_numba_parameters,
)
from pandas.core.window.indexers import (
BaseIndexer,
ExponentialMovingWindowIndexer,
GroupbyIndexer,
)
from pandas.core.window.numba_ import generate_numba_ewma_func
from pandas.core.window.online import (
EWMMeanState,
generate_online_numba_ewma_func,
)
from pandas.core.window.rolling import (
BaseWindow,
BaseWindowGroupby,
)
def get_center_of_mass(
comass: float | None,
span: float | None,
halflife: float | None,
alpha: float | None,
) -> float:
valid_count = common.count_not_none(comass, span, halflife, alpha)
if valid_count > 1:
raise ValueError("comass, span, halflife, and alpha are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if comass is not None:
if comass < 0:
raise ValueError("comass must satisfy: comass >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
comass = (span - 1) / 2
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
comass = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
comass = (1 - alpha) / alpha
else:
raise ValueError("Must pass one of comass, span, halflife, or alpha")
return float(comass)
def _calculate_deltas(
times: str | np.ndarray | FrameOrSeries | None,
halflife: float | TimedeltaConvertibleTypes | None,
) -> np.ndarray:
"""
Return the diff of the times divided by the half-life. These values are used in
the calculation of the ewm mean.
Parameters
----------
times : str, np.ndarray, Series, default None
Times corresponding to the observations. Must be monotonically increasing
and ``datetime64[ns]`` dtype.
halflife : float, str, timedelta, optional
Half-life specifying the decay
Returns
-------
np.ndarray
Diff of the times divided by the half-life
"""
# error: Item "str" of "Union[str, ndarray, FrameOrSeries, None]" has no
# attribute "view"
# error: Item "None" of "Union[str, ndarray, FrameOrSeries, None]" has no
# attribute "view"
_times = np.asarray(
times.view(np.int64), dtype=np.float64 # type: ignore[union-attr]
)
_halflife = float(Timedelta(halflife).value)
return np.diff(_times) / _halflife
class ExponentialMovingWindow(BaseWindow):
r"""
Provide exponential weighted (EW) functions.
Available EW functions: ``mean()``, ``var()``, ``std()``, ``corr()``, ``cov()``.
Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be
provided.
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`.
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`.
halflife : float, str, timedelta, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for
:math:`halflife > 0`.
If ``times`` is specified, the time unit (str or timedelta) over which an
observation decays to half its value. Only applicable to ``mean()``
and halflife value will not apply to the other functions.
.. versionadded:: 1.1.0
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`.
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
adjust : bool, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average).
- When ``adjust=True`` (default), the EW function is calculated using weights
:math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series
[:math:`x_0, x_1, ..., x_t`] would be:
.. math::
y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 -
\alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t}
- When ``adjust=False``, the exponentially weighted function is calculated
recursively:
.. math::
\begin{split}
y_0 &= x_0\\
y_t &= (1 - \alpha) y_{t-1} + \alpha x_t,
\end{split}
ignore_na : bool, default False
Ignore missing values when calculating weights; specify ``True`` to reproduce
pre-0.15.0 behavior.
- When ``ignore_na=False`` (default), weights are based on absolute positions.
For example, the weights of :math:`x_0` and :math:`x_2` used in calculating
the final weighted average of [:math:`x_0`, None, :math:`x_2`] are
:math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and
:math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``.
- When ``ignore_na=True`` (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of :math:`x_0` and :math:`x_2`
used in calculating the final weighted average of
[:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if
``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``.
axis : {0, 1}, default 0
The axis to use. The value 0 identifies the rows, and 1
identifies the columns.
times : str, np.ndarray, Series, default None
.. versionadded:: 1.1.0
Times corresponding to the observations. Must be monotonically increasing and
``datetime64[ns]`` dtype.
If str, the name of the column in the DataFrame representing the times.
If 1-D array like, a sequence with the same shape as the observations.
Only applicable to ``mean()``.
Returns
-------
DataFrame
A Window sub-classed for the particular operation.
See Also
--------
rolling : Provides rolling window calculations.
expanding : Provides expanding transformations.
Notes
-----
More details can be found at:
:ref:`Exponentially weighted windows <window.exponentially_weighted>`.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Specifying ``times`` with a timedelta ``halflife`` when computing mean.
>>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']
>>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()
B
0 0.000000
1 0.585786
2 1.523889
3 1.523889
4 3.233686
"""
_attributes = [
"com",
"span",
"halflife",
"alpha",
"min_periods",
"adjust",
"ignore_na",
"axis",
"times",
]
def __init__(
self,
obj: FrameOrSeries,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: Axis = 0,
times: str | np.ndarray | FrameOrSeries | None = None,
*,
selection=None,
):
super().__init__(
obj=obj,
min_periods=1 if min_periods is None else max(int(min_periods), 1),
on=None,
center=False,
closed=None,
method="single",
axis=axis,
selection=selection,
)
self.com = com
self.span = span
self.halflife = halflife
self.alpha = alpha
self.adjust = adjust
self.ignore_na = ignore_na
self.times = times
if self.times is not None:
if not self.adjust:
raise NotImplementedError("times is not supported with adjust=False.")
if isinstance(self.times, str):
self.times = self._selected_obj[self.times]
if not is_datetime64_ns_dtype(self.times):
raise ValueError("times must be datetime64[ns] dtype.")
# error: Argument 1 to "len" has incompatible type "Union[str, ndarray,
# FrameOrSeries, None]"; expected "Sized"
if len(self.times) != len(obj): # type: ignore[arg-type]
raise ValueError("times must be the same length as the object.")
if not isinstance(self.halflife, (str, datetime.timedelta)):
raise ValueError(
"halflife must be a string or datetime.timedelta object"
)
if isna(self.times).any():
raise ValueError("Cannot convert NaT values to integer")
self._deltas = _calculate_deltas(self.times, self.halflife)
# Halflife is no longer applicable when calculating COM
# But allow COM to still be calculated if the user passes other decay args
if common.count_not_none(self.com, self.span, self.alpha) > 0:
self._com = get_center_of_mass(self.com, self.span, None, self.alpha)
else:
self._com = 1.0
else:
if self.halflife is not None and isinstance(
self.halflife, (str, datetime.timedelta)
):
raise ValueError(
"halflife can only be a timedelta convertible argument if "
"times is not None."
)
# Without times, points are equally spaced
self._deltas = np.ones(max(len(self.obj) - 1, 0), dtype=np.float64)
self._com = get_center_of_mass(
# error: Argument 3 to "get_center_of_mass" has incompatible type
# "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]";
# expected "Optional[float]"
self.com,
self.span,
self.halflife, # type: ignore[arg-type]
self.alpha,
)
def _get_window_indexer(self) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
"""
return ExponentialMovingWindowIndexer()
def online(self, engine="numba", engine_kwargs=None):
"""
Return an ``OnlineExponentialMovingWindow`` object to calculate
exponentially moving window aggregations in an online method.
.. versionadded:: 1.3.0
Parameters
----------
engine: str, default ``'numba'``
Execution engine to calculate online aggregations.
Applies to all supported aggregation methods.
engine_kwargs : dict, default None
Applies to all supported aggregation methods.
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to the function
Returns
-------
OnlineExponentialMovingWindow
"""
return OnlineExponentialMovingWindow(
obj=self.obj,
com=self.com,
span=self.span,
halflife=self.halflife,
alpha=self.alpha,
min_periods=self.min_periods,
adjust=self.adjust,
ignore_na=self.ignore_na,
axis=self.axis,
times=self.times,
engine=engine,
engine_kwargs=engine_kwargs,
selection=self._selection,
)
@doc(
_shared_docs["aggregate"],
see_also=dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
"""
),
examples=dedent(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
>>> df.ewm(alpha=0.5).mean()
A B C
0 1.000000 4.000000 7.000000
1 1.666667 4.666667 7.666667
2 2.428571 5.428571 8.428571
"""
),
klass="Series/Dataframe",
axis="",
)
def aggregate(self, func, *args, **kwargs):
return super().aggregate(func, *args, **kwargs)
agg = aggregate
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes.replace("\n", "", 1),
window_method="ewm",
aggregation_description="(exponential weighted moment) mean",
agg_method="mean",
)
def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
ewma_func = generate_numba_ewma_func(
engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas
)
return self._apply(
ewma_func,
numba_cache_key=(lambda x: x, "ewma"),
)
elif engine in ("cython", None):
if engine_kwargs is not None:
raise ValueError("cython engine does not accept engine_kwargs")
nv.validate_window_func("mean", args, kwargs)
deltas = None if self.times is None else self._deltas
window_func = partial(
window_aggregations.ewma,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
deltas=deltas,
)
return self._apply(window_func)
else:
raise ValueError("engine must be either 'numba' or 'cython'")
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) standard deviation",
agg_method="std",
)
def std(self, bias: bool = False, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
return zsqrt(self.var(bias=bias, **kwargs))
def vol(self, bias: bool = False, *args, **kwargs):
warnings.warn(
(
"vol is deprecated will be removed in a future version. "
"Use std instead."
),
FutureWarning,
stacklevel=2,
)
return self.std(bias, *args, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) variance",
agg_method="var",
)
def var(self, bias: bool = False, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
window_func = window_aggregations.ewmcov
wfunc = partial(
window_func,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
bias=bias,
)
def var_func(values, begin, end, min_periods):
return wfunc(values, begin, end, min_periods, values)
return self._apply(var_func)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
other : Series or DataFrame , optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) sample covariance",
agg_method="cov",
)
def cov(
self,
other: FrameOrSeriesUnion | None = None,
pairwise: bool | None = None,
bias: bool = False,
**kwargs,
):
from pandas import Series
def cov_func(x, y):
x_array = self._prep_values(x)
y_array = self._prep_values(y)
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
start, end = window_indexer.get_window_bounds(
num_values=len(x_array),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
result = window_aggregations.ewmcov(
x_array,
start,
end,
# error: Argument 4 to "ewmcov" has incompatible type
# "Optional[int]"; expected "int"
self.min_periods, # type: ignore[arg-type]
y_array,
self._com,
self.adjust,
self.ignore_na,
bias,
)
return Series(result, index=x.index, name=x.name)
return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
other : Series or DataFrame, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) sample correlation",
agg_method="corr",
)
def corr(
self,
other: FrameOrSeriesUnion | None = None,
pairwise: bool | None = None,
**kwargs,
):
from pandas import Series
def cov_func(x, y):
x_array = self._prep_values(x)
y_array = self._prep_values(y)
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
start, end = window_indexer.get_window_bounds(
num_values=len(x_array),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
def _cov(X, Y):
return window_aggregations.ewmcov(
X,
start,
end,
min_periods,
Y,
self._com,
self.adjust,
self.ignore_na,
True,
)
with np.errstate(all="ignore"):
cov = _cov(x_array, y_array)
x_var = _cov(x_array, x_array)
y_var = _cov(y_array, y_array)
result = cov / zsqrt(x_var * y_var)
return Series(result, index=x.index, name=x.name)
return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)
class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow):
"""
Provide an exponential moving window groupby implementation.
"""
_attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes
def __init__(self, obj, *args, _grouper=None, **kwargs):
super().__init__(obj, *args, _grouper=_grouper, **kwargs)
if not obj.empty and self.times is not None:
# sort the times and recalculate the deltas according to the groups
groupby_order = np.concatenate(list(self._grouper.indices.values()))
self._deltas = _calculate_deltas(
self.times.take(groupby_order), # type: ignore[union-attr]
self.halflife,
)
def _get_window_indexer(self) -> GroupbyIndexer:
"""
Return an indexer class that will compute the window start and end bounds
Returns
-------
GroupbyIndexer
"""
window_indexer = GroupbyIndexer(
groupby_indicies=self._grouper.indices,
window_indexer=ExponentialMovingWindowIndexer,
)
return window_indexer
class OnlineExponentialMovingWindow(ExponentialMovingWindow):
def __init__(
self,
obj: FrameOrSeries,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: Axis = 0,
times: str | np.ndarray | FrameOrSeries | None = None,
engine: str = "numba",
engine_kwargs: dict[str, bool] | None = None,
*,
selection=None,
):
if times is not None:
raise NotImplementedError(
"times is not implemented with online operations."
)
super().__init__(
obj=obj,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
selection=selection,
)
self._mean = EWMMeanState(
self._com, self.adjust, self.ignore_na, self.axis, obj.shape
)
if maybe_use_numba(engine):
self.engine = engine
self.engine_kwargs = engine_kwargs
else:
raise ValueError("'numba' is the only supported engine")
def reset(self):
"""
Reset the state captured by `update` calls.
"""
self._mean.reset()
def aggregate(self, func, *args, **kwargs):
return NotImplementedError
def std(self, bias: bool = False, *args, **kwargs):
return NotImplementedError
def corr(
self,
other: FrameOrSeriesUnion | None = None,
pairwise: bool | None = None,
**kwargs,
):
return NotImplementedError
def cov(
self,
other: FrameOrSeriesUnion | None = None,
pairwise: bool | None = None,
bias: bool = False,
**kwargs,
):
return NotImplementedError
def var(self, bias: bool = False, *args, **kwargs):
return NotImplementedError
def mean(self, *args, update=None, update_times=None, **kwargs):
"""
Calculate an online exponentially weighted mean.
Parameters
----------
update: DataFrame or Series, default None
New values to continue calculating the
exponentially weighted mean from the last values and weights.
Values should be float64 dtype.
``update`` needs to be ``None`` the first time the
exponentially weighted mean is calculated.
update_times: Series or 1-D np.ndarray, default None
New times to continue calculating the
exponentially weighted mean from the last values and weights.
If ``None``, values are assumed to be evenly spaced
in time.
This feature is currently unsupported.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)})
>>> online_ewm = df.head(2).ewm(0.5).online()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
>>> online_ewm.mean(update=df.tail(3))
a b
2 1.615385 6.615385
3 2.550000 7.550000
4 3.520661 8.520661
>>> online_ewm.reset()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
"""
result_kwargs = {}
is_frame = True if self._selected_obj.ndim == 2 else False
if update_times is not None:
raise NotImplementedError("update_times is not implemented.")
else:
update_deltas = np.ones(
max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64
)
if update is not None:
if self._mean.last_ewm is None:
raise ValueError(
"Must call mean with update=None first before passing update"
)
result_from = 1
result_kwargs["index"] = update.index
if is_frame:
last_value = self._mean.last_ewm[np.newaxis, :]
result_kwargs["columns"] = update.columns
else:
last_value = self._mean.last_ewm
result_kwargs["name"] = update.name
np_array = np.concatenate((last_value, update.to_numpy()))
else:
result_from = 0
result_kwargs["index"] = self._selected_obj.index
if is_frame:
result_kwargs["columns"] = self._selected_obj.columns
else:
result_kwargs["name"] = self._selected_obj.name
np_array = self._selected_obj.astype(np.float64).to_numpy()
ewma_func = generate_online_numba_ewma_func(self.engine_kwargs)
result = self._mean.run_ewm(
np_array if is_frame else np_array[:, np.newaxis],
update_deltas,
self.min_periods,
ewma_func,
)
if not is_frame:
result = result.squeeze()
result = result[result_from:]
result = self._selected_obj._constructor(result, **result_kwargs)
return result
|
the-stack_106_17797
|
import os
import threading
import time
import requests
from werkzeug.serving import make_server
from flask import Response
class ServerThread(threading.Thread):
def __init__(self, app):
super().__init__()
@app.route('/ping', methods=['GET'])
def ping():
return Response(status=200)
self.host = os.environ['HAIL_BATCH_WORKER_IP']
self.port = os.environ['HAIL_BATCH_WORKER_PORT']
self.app = app
self.server = make_server('0.0.0.0', 5000, app)
self.context = app.app_context()
self.context.push()
def url_for(self, uri):
return f'http://{self.host}:{self.port}{uri}'
def ping(self):
ping_url = 'http://{}:{}/ping'.format(self.host, self.port)
up = False
while not up:
try:
requests.get(ping_url)
up = True
except requests.exceptions.ConnectionError:
time.sleep(0.01)
def start(self):
super().start()
self.ping()
def run(self):
self.server.serve_forever()
def shutdown(self):
self.server.shutdown()
|
the-stack_106_17800
|
import numpy as np
import mahotas.thin
import pytest
def slow_thin(binimg, n=-1):
"""
This was the old implementation
"""
from mahotas.bbox import bbox
from mahotas._morph import hitmiss
_struct_elems = []
_struct_elems.append([
[0,0,0],
[2,1,2],
[1,1,1]])
_struct_elems.append([
[2,0,0],
[1,1,0],
[1,1,2]])
_struct_elems.append([
[1,2,0],
[1,1,0],
[1,2,0]])
_struct_elems.append([
[1,1,2],
[1,1,0],
[2,0,0]])
_struct_elems.append([
[1,1,1],
[2,1,2],
[0,0,0]])
_struct_elems.append([
[2,1,1],
[0,1,1],
[0,0,2]])
_struct_elems.append([
[0,2,1],
[0,1,1],
[0,2,1]])
_struct_elems.append([
[0,0,2],
[0,1,1],
[2,1,1]])
_struct_elems = [np.array(elem, np.uint8) for elem in _struct_elems]
res = np.zeros_like(binimg)
min0,max0,min1,max1 = bbox(binimg)
r,c = (max0-min0,max1-min1)
image_exp = np.zeros((r+2, c+2), np.uint8)
imagebuf = np.zeros((r+2,c+2), np.uint8)
prev = np.zeros((r+2,c+2), np.uint8)
image_exp[1:r+1, 1:c+1] = binimg[min0:max0,min1:max1]
n_iter = 0
while True:
prev[:] = image_exp[:]
for elem in _struct_elems:
newimg = hitmiss(image_exp, elem, imagebuf)
image_exp -= newimg
if np.all(prev == image_exp):
break
n_iter += 1
if (n > 0) and (n_iter == n):
break
res[min0:max0,min1:max1] = image_exp[1:r+1, 1:c+1]
return res
def test_thin():
A = np.zeros((100,100), bool)
A[20:40] = 1
W = mahotas.thin(A)
assert mahotas.erode(W).sum() == 0
assert (W & A).sum() == W.sum()
def gen_compares():
A = np.zeros((100,100), bool)
yield A.copy()
A[20:40] = 1
yield A.copy()
A[:,20:40] = 1
yield A.copy()
A[60:80,60:80] = 1
yield A.copy()
@pytest.mark.parametrize('A', gen_compares())
def test_compare(A):
W = mahotas.thin(A)
W2 = slow_thin(A)
assert np.all(W == W2)
|
the-stack_106_17801
|
from django.http import Http404
from rest_framework import status
from rest_framework import serializers
from rest_framework.response import Response
from rest_framework.generics import ListAPIView
class RelatedModelMixin:
related_field = None
related_model_class = None
related_model_serializer_class = None
payload_serializer_class = None
def get_object(self):
pk = self.kwargs.get('pk')
return super().get_queryset().get(pk=pk)
def get_related_field(self):
if self.related_field is not None:
return self.related_field
raise NotImplementedError(
f'''[RelatedModelMixin][{self.__class__.__name__}] either include a class property
`related_field` or implement class method `get_related_field`'''
)
def get_payload_serializer_class(self):
if self.payload_serializer_class is not None:
return self.payload_serializer_class
raise NotImplementedError(
f'''[RelatedModelMixin][{self.__class__.__name__}] either include a class property
`payload_serializer_class` or implement class method `get_payload_serializer_class`'''
)
def get_serializer_context(self):
return dict(view=self.__class__)
def get_payload_serializer(self):
SerializerClass = self.get_payload_serializer_class()
return SerializerClass(data=self.request.data, context=self.get_serializer_context())
def get_related_model_class(self):
if self.related_model_class is not None:
return self.related_model_class
raise NotImplementedError(
f'''[RelatedModelMixin][{self.__class__.__name__}] either include a class property
`related_model_class` or implement class method `get_related_model_class`'''
)
def process_related_model_instance(self, instance, *args, **kwargs):
raise NotImplementedError(
f'[RelatedModelMixin][{self.__class__.__name__}] `process_related_model_instance` not implemented'
)
def prepare_response(self, *args, **kwargs):
raise NotImplementedError(
f'[RelatedModelMixin][{self.__class__.__name__}] `prepare_response` not implemented'
)
def process_related_model(self, *args, **kwargs):
payload_serializer = self.get_payload_serializer()
if not payload_serializer.is_valid():
raise ValueError(payload_serializer.errors)
RelatedModel = self.get_related_model_class()
related_instances = RelatedModel.objects.filter(
id__in=payload_serializer.data.get(self.get_related_field())
)
for related_instance in related_instances:
self.process_related_model_instance(related_instance, *args, **kwargs)
return self.prepare_response(*args, **kwargs)
class AddRelatedEntityMixin(RelatedModelMixin):
def patch(self, request, pk):
return self.process_related_model()
class RemoveRelatedEntityMixin(RelatedModelMixin):
def delete(self, request, pk):
return self.process_related_model()
class RelatedEntityAPIView(AddRelatedEntityMixin, RemoveRelatedEntityMixin, ListAPIView):
serializer_class = None
def __init__(self, *args, **kwargs):
self.processed = []
def get_payload_serializer_class(self):
return self.PayloadSerializer
def get_queryset(self):
queryset = super(ListAPIView, self).get_queryset()
if self.request.method == 'GET':
try:
return self.get_related_queryset()
except queryset.model.DoesNotExist:
raise Http404
return queryset
def get_related_queryset(self):
view_object = self.get_object()
return getattr(view_object, self.get_related_field())
def get_serializer_class(self):
if self.request.method == 'GET':
return super(ListAPIView, self).get_serializer_class()
else:
return self.get_payload_serializer_class()
def process_related_model_instance(self, instance):
related_queryset = self.get_related_queryset()
object_exists = related_queryset.filter(pk=instance.id).exists()
if object_exists and self.request.method == 'DELETE':
related_queryset.remove(instance)
elif not object_exists and self.request.method == 'PATCH':
related_queryset.add(instance)
self.processed.append(instance.id)
def prepare_response(self):
PayloadSerializer = self.get_serializer_class()
payload = {self.get_related_field(): self.processed}
serializer_context = self.get_serializer_context()
payload_serializer = PayloadSerializer(data=payload, context=serializer_context)
if payload_serializer.is_valid():
return Response(payload_serializer.data, status=status.HTTP_200_OK)
return Response(payload_serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class PayloadSerializer(serializers.Serializer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
View = self.context.get('view')
payload_field = getattr(View, 'related_field')
self.fields[payload_field] = serializers.ListField(child=serializers.IntegerField())
|
the-stack_106_17802
|
"""
Module containing logic related to eager DataFrames
"""
import os
import typing as tp
import warnings
from io import BytesIO, StringIO
from pathlib import Path
from typing import (
Any,
BinaryIO,
Callable,
Dict,
Iterable,
Iterator,
Optional,
Sequence,
TextIO,
Tuple,
Type,
Union,
)
import numpy as np
import pyarrow as pa
import pyarrow.compute
import pyarrow.parquet
import polars as pl
from polars.internals.construction import (
arrow_to_pydf,
dict_to_pydf,
numpy_to_pydf,
pandas_to_pydf,
sequence_to_pydf,
series_to_pydf,
)
try:
from polars.polars import PyDataFrame, PySeries
_DOCUMENTING = False
except ImportError:
_DOCUMENTING = True
from .._html import NotebookFormatter
from ..datatypes import DTYPES, Boolean, DataType, UInt32, pytype_to_polars_type
from ..utils import _process_null_values
try:
import pandas as pd
_PANDAS_AVAILABLE = True
except ImportError:
_PANDAS_AVAILABLE = False
__all__ = [
"DataFrame",
]
def wrap_df(df: "PyDataFrame") -> "DataFrame":
return DataFrame._from_pydf(df)
def _prepare_other_arg(other: Any) -> "pl.Series":
# if not a series create singleton series such that it will broadcast
if not isinstance(other, pl.Series):
if isinstance(other, str):
pass
elif isinstance(other, Sequence):
raise ValueError("Operation not supported.")
other = pl.Series("", [other])
return other
class DataFrame:
"""
A DataFrame is a two-dimensional data structure that represents data as a table
with rows and columns.
Parameters
----------
data : dict, Sequence, ndarray, Series, or pandas.DataFrame
Two-dimensional data in various forms. dict must contain Sequences.
Sequence may contain Series or other Sequences.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. If specified, overrides any
labels already present in the data. Must match data dimensions.
orient : {'col', 'row'}, default None
Whether to interpret two-dimensional data as columns or as rows. If None,
the orientation is infered by matching the columns and data dimensions. If
this does not yield conclusive results, column orientation is used.
nullable : bool, default True
If your data does not contain null values, set to False to speed up
DataFrame creation.
Examples
--------
Constructing a DataFrame from a dictionary:
>>> data = {'a': [1, 2], 'b': [3, 4]}
>>> df = pl.DataFrame(data)
>>> df
shape: (2, 2)
╭─────┬─────╮
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 4 │
╰─────┴─────╯
Notice that the dtype is automatically inferred as a polars Int64:
>>> df.dtypes
[<class 'polars.datatypes.Int64'>, <class 'polars.datatypes.Int64'>]
In order to specify dtypes for your columns, initialize the DataFrame with a list
of Series instead:
>>> data = [pl.Series('col1', [1, 2], dtype=pl.Float32),
... pl.Series('col2', [3, 4], dtype=pl.Int64)]
>>> df2 = pl.DataFrame(series)
>>> df2
shape: (2, 2)
╭──────┬──────╮
│ col1 ┆ col2 │
│ --- ┆ --- │
│ f32 ┆ i64 │
╞══════╪══════╡
│ 1 ┆ 3 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ 4 │
╰──────┴──────╯
Constructing a DataFrame from a numpy ndarray, specifying column names:
>>> data = np.array([(1, 2), (3, 4)])
>>> df3 = pl.DataFrame(data, columns=['a', 'b'], orient='col')
>>> df3
shape: (2, 2)
╭─────┬─────╮
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 4 │
╰─────┴─────╯
Constructing a DataFrame from a list of lists, row orientation inferred:
>>> data = [[1, 2, 3], [4, 5, 6]]
>>> df4 = pl.DataFrame(data, columns=['a', 'b', 'c'])
>>> df4
shape: (2, 3)
╭─────┬─────┬─────╮
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 5 ┆ 6 │
╰─────┴─────┴─────╯
"""
def __init__(
self,
data: Optional[
Union[
Dict[str, Sequence[Any]],
Sequence[Any],
np.ndarray,
"pd.DataFrame",
"pl.Series",
]
] = None,
columns: Optional[Sequence[str]] = None,
orient: Optional[str] = None,
nullable: bool = True,
):
# Handle positional arguments for old constructor
if isinstance(columns, bool):
warnings.warn(
"Specifying nullable as a positional argument is deprecated. "
"Use a keyword argument to silence this warning.",
DeprecationWarning,
stacklevel=2,
)
nullable = columns
columns = None
if data is None:
self._df = dict_to_pydf({}, columns=columns, nullable=nullable)
elif isinstance(data, dict):
self._df = dict_to_pydf(data, columns=columns, nullable=nullable)
elif isinstance(data, np.ndarray):
self._df = numpy_to_pydf(
data, columns=columns, orient=orient, nullable=nullable
)
elif isinstance(data, pa.Table):
self._df = arrow_to_pydf(data, columns=columns)
elif isinstance(data, Sequence) and not isinstance(data, str):
self._df = sequence_to_pydf(
data, columns=columns, orient=orient, nullable=nullable
)
elif isinstance(data, pl.Series):
self._df = series_to_pydf(data, columns=columns)
elif _PANDAS_AVAILABLE and isinstance(data, pd.DataFrame):
self._df = pandas_to_pydf(data, columns=columns)
else:
raise ValueError("DataFrame constructor not called properly.")
@classmethod
def _from_pydf(cls, py_df: "PyDataFrame") -> "DataFrame":
"""
Construct Polars DataFrame from FFI PyDataFrame object.
"""
df = cls.__new__(cls)
df._df = py_df
return df
@classmethod
def _from_dict(
cls,
data: Dict[str, Sequence[Any]],
columns: Optional[Sequence[str]] = None,
nullable: bool = True,
) -> "DataFrame":
"""
Construct a DataFrame from a dictionary of sequences.
Parameters
----------
data : dict of sequences
Two-dimensional data represented as a dictionary. dict must contain
Sequences.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. If specified, overrides any
labels already present in the data. Must match data dimensions.
nullable : bool, default True
If your data does not contain null values, set to False to speed up
DataFrame creation.
Returns
-------
DataFrame
"""
return cls._from_pydf(dict_to_pydf(data, columns=columns, nullable=nullable))
@classmethod
def _from_records(
cls,
data: Union[np.ndarray, Sequence[Sequence[Any]]],
columns: Optional[Sequence[str]] = None,
orient: Optional[str] = None,
nullable: bool = True,
) -> "DataFrame":
"""
Construct a DataFrame from a numpy ndarray or sequence of sequences.
Parameters
----------
data : numpy ndarray or Sequence of sequences
Two-dimensional data represented as numpy ndarray or sequence of sequences.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. Must match data dimensions.
If not specified, columns will be named `column_0`, `column_1`, etc.
orient : {'col', 'row'}, default None
Whether to interpret two-dimensional data as columns or as rows. If None,
the orientation is infered by matching the columns and data dimensions. If
this does not yield conclusive results, column orientation is used.
nullable : bool, default True
If your data does not contain null values, set to False to speed up
DataFrame creation.
Returns
-------
DataFrame
"""
if isinstance(data, np.ndarray):
pydf = numpy_to_pydf(
data, columns=columns, orient=orient, nullable=nullable
)
else:
pydf = sequence_to_pydf(
data, columns=columns, orient=orient, nullable=nullable
)
return cls._from_pydf(pydf)
@classmethod
def _from_arrow(
cls,
data: pa.Table,
columns: Optional[Sequence[str]] = None,
rechunk: bool = True,
) -> "DataFrame":
"""
Construct a DataFrame from an Arrow table.
This operation will be zero copy for the most part. Types that are not
supported by Polars may be cast to the closest supported type.
Parameters
----------
data : numpy ndarray or Sequence of sequences
Two-dimensional data represented as Arrow table.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. Must match data dimensions.
If not specified, existing Array table columns are used, with missing names
named as `column_0`, `column_1`, etc.
rechunk : bool, default True
Make sure that all data is contiguous.
Returns
-------
DataFrame
"""
return cls._from_pydf(arrow_to_pydf(data, columns=columns, rechunk=rechunk))
@classmethod
def _from_pandas(
cls,
data: "pd.DataFrame",
columns: Optional[Sequence[str]] = None,
rechunk: bool = True,
) -> "DataFrame":
"""
Construct a Polars DataFrame from a pandas DataFrame.
Parameters
----------
data : pandas DataFrame
Two-dimensional data represented as a pandas DataFrame.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. If specified, overrides any
labels already present in the data. Must match data dimensions.
rechunk : bool, default True
Make sure that all data is contiguous.
Returns
-------
DataFrame
"""
return cls._from_pydf(pandas_to_pydf(data, columns=columns, rechunk=rechunk))
@classmethod
def from_arrow(cls, table: pa.Table, rechunk: bool = True) -> "DataFrame":
"""
.. deprecated:: 0.8.13
`DataFrame.from_arrow` will be removed in Polars 0.9.0. Use `pl.from_arrow`
instead, or call the DataFrame constructor directly.
Construct a DataFrame from an arrow Table.
Most will be zero copy. Types that are not supported by Polars may be cast to a
closest supported type.
Parameters
----------
table
Arrow Table.
rechunk
Make sure that all data is contiguous.
"""
warnings.warn(
"DataFrame.from_arrow is deprecated, Use `pl.from_arrow` instead, "
"or call the DataFrame constructor directly.",
DeprecationWarning,
stacklevel=2,
)
return cls._from_arrow(table, rechunk=rechunk)
@classmethod
def from_rows(
cls,
rows: Sequence[Sequence[Any]],
column_names: Optional[Sequence[str]] = None,
column_name_mapping: Optional[Dict[int, str]] = None,
) -> "DataFrame":
"""
.. deprecated:: 0.8.13
`from_rows` will be removed in Polars 0.9.0, it is replaced by
`from_records` because the latter offers more versatility. To keep the same
functionality, call `from_records` with `orient='row'`
Create a DataFrame from rows. This should only be used as a last resort,
as this is more expensive than creating from columnar data.
Parameters
----------
rows
rows.
column_names
column names to use for the DataFrame.
column_name_mapping
map column index to a new name:
Example:
>>> column_mapping: {0: "first_column, 3: "fourth column"}
"""
warnings.warn(
"from_rows is deprecated, use from_records with orient='row'.",
DeprecationWarning,
stacklevel=2,
)
df = DataFrame.__new__(DataFrame)
df._df = PyDataFrame.read_rows(rows)
if column_names is not None:
df.columns = list(column_names)
if column_name_mapping is not None:
for i, name in column_name_mapping.items():
s = df[:, i]
s.rename(name, in_place=True)
df.replace_at_idx(i, s)
return df
@staticmethod
def read_csv(
file: Union[str, BinaryIO, bytes],
infer_schema_length: int = 100,
batch_size: int = 64,
has_headers: bool = True,
ignore_errors: bool = False,
stop_after_n_rows: Optional[int] = None,
skip_rows: int = 0,
projection: Optional[tp.List[int]] = None,
sep: str = ",",
columns: Optional[tp.List[str]] = None,
rechunk: bool = True,
encoding: str = "utf8",
n_threads: Optional[int] = None,
dtype: Union[Dict[str, Type[DataType]], tp.List[Type[DataType]], None] = None,
low_memory: bool = False,
comment_char: Optional[str] = None,
null_values: Optional[Union[str, tp.List[str], Dict[str, str]]] = None,
) -> "DataFrame":
"""
Read a CSV file into a Dataframe.
Parameters
----------
file
Path to a file or a file like object. Any valid filepath can be used. Example: `file.csv`.
infer_schema_length
Maximum number of lines to read to infer schema.
batch_size
Number of lines to read into the buffer at once. Modify this to change performance.
has_headers
Indicate if first row of dataset is header or not. If set to False first row will be set to `column_x`,
`x` being an enumeration over every column in the dataset.
ignore_errors
Try to keep reading lines if some lines yield errors.
stop_after_n_rows
After n rows are read from the CSV, it stops reading.
During multi-threaded parsing, an upper bound of `n` rows
cannot be guaranteed.
skip_rows
Start reading after `skip_rows`.
projection
Indexes of columns to select. Note that column indexes count from zero.
sep
Character to use as delimiter in the file.
columns
Columns to project/ select.
rechunk
Make sure that all columns are contiguous in memory by aggregating the chunks into a single array.
encoding
Allowed encodings: `utf8`, `utf8-lossy`. Lossy means that invalid utf8 values are replaced with `�` character.
n_threads
Number of threads to use in csv parsing. Defaults to the number of physical cpu's of your system.
dtype
Overwrite the dtypes during inference.
low_memory
Reduce memory usage in expense of performance.
comment_char
character that indicates the start of a comment line, for instance '#'.
null_values
Values to interpret as null values. You can provide a:
- str -> all values encountered equal to this string will be null
- tp.List[str] -> A null value per column.
- Dict[str, str] -> A dictionary that maps column name to a null value string.
Returns
-------
DataFrame
Examples
--------
>>> df = pl.read_csv('file.csv', sep=';', stop_after_n_rows=25)
"""
self = DataFrame.__new__(DataFrame)
path: Optional[str]
if isinstance(file, str):
path = file
else:
path = None
if isinstance(file, BytesIO):
file = file.getvalue()
if isinstance(file, StringIO):
file = file.getvalue().encode()
dtype_list: Optional[tp.List[Tuple[str, Type[DataType]]]] = None
dtype_slice: Optional[tp.List[Type[DataType]]] = None
if dtype is not None:
if isinstance(dtype, dict):
dtype_list = []
for k, v in dtype.items():
dtype_list.append((k, pytype_to_polars_type(v)))
elif isinstance(dtype, list):
dtype_slice = dtype
else:
raise ValueError("dtype arg should be list or dict")
processed_null_values = _process_null_values(null_values)
self._df = PyDataFrame.read_csv(
file,
infer_schema_length,
batch_size,
has_headers,
ignore_errors,
stop_after_n_rows,
skip_rows,
projection,
sep,
rechunk,
columns,
encoding,
n_threads,
path,
dtype_list,
dtype_slice,
low_memory,
comment_char,
processed_null_values,
)
return self
@staticmethod
def read_parquet(
file: Union[str, BinaryIO],
stop_after_n_rows: Optional[int] = None,
) -> "DataFrame":
"""
Read into a DataFrame from a parquet file.
Parameters
----------
file
Path to a file or a file like object. Any valid filepath can be used.
stop_after_n_rows
Only read specified number of rows of the dataset. After `n` stops reading.
"""
self = DataFrame.__new__(DataFrame)
self._df = PyDataFrame.read_parquet(file, stop_after_n_rows)
return self
@staticmethod
def read_ipc(file: Union[str, BinaryIO]) -> "DataFrame":
"""
Read into a DataFrame from Arrow IPC stream format. This is also called the feather format.
Parameters
----------
file
Path to a file or a file like object.
Returns
-------
DataFrame
"""
self = DataFrame.__new__(DataFrame)
self._df = PyDataFrame.read_ipc(file)
return self
@staticmethod
def read_json(file: Union[str, BytesIO]) -> "DataFrame":
"""
Read into a DataFrame from JSON format.
Parameters
----------
file
Path to a file or a file like object.
"""
if not isinstance(file, str):
file = file.read().decode("utf8")
self = DataFrame.__new__(DataFrame)
self._df = PyDataFrame.read_json(file)
return self
def to_arrow(self) -> pa.Table:
"""
Collect the underlying arrow arrays in an Arrow Table.
This operation is mostly zero copy.
Data types that do copy:
- CategoricalType
"""
record_batches = self._df.to_arrow()
return pa.Table.from_batches(record_batches)
def to_dict(
self, as_series: bool = True
) -> Union[Dict[str, "pl.Series"], Dict[str, tp.List[Any]]]:
"""
Convert DataFrame to a dictionary mapping column name to values.
Parameters
----------
as_series
True -> Values are series
False -> Values are List[Any]
Examples
--------
>>> df = pl.DataFrame({
>>> "A": [1, 2, 3, 4, 5],
>>> "fruits": ["banana", "banana", "apple", "apple", "banana"],
>>> "B": [5, 4, 3, 2, 1],
>>> "cars": ["beetle", "audi", "beetle", "beetle", "beetle"],
>>> "optional": [28, 300, None, 2, -30],
>>> })
shape: (5, 5)
┌─────┬──────────┬─────┬──────────┬──────────┐
│ A ┆ fruits ┆ B ┆ cars ┆ optional │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ i64 ┆ str ┆ i64 │
╞═════╪══════════╪═════╪══════════╪══════════╡
│ 1 ┆ "banana" ┆ 5 ┆ "beetle" ┆ 28 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ "banana" ┆ 4 ┆ "audi" ┆ 300 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ "apple" ┆ 3 ┆ "beetle" ┆ null │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 4 ┆ "apple" ┆ 2 ┆ "beetle" ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ "banana" ┆ 1 ┆ "beetle" ┆ -30 │
└─────┴──────────┴─────┴──────────┴──────────┘
>>> df.to_dict(as_series=False)
{'A': [1, 2, 3, 4, 5],
'fruits': ['banana', 'banana', 'apple', 'apple', 'banana'],
'B': [5, 4, 3, 2, 1],
'cars': ['beetle', 'audi', 'beetle', 'beetle', 'beetle'],
'optional': [28, 300, None, 2, -30]}
>>> df.to_dict(as_series=True)
{'A': shape: (5,)
Series: 'A' [i64]
[
1
2
3
4
5
],
'fruits': shape: (5,)
...
Series: 'optional' [i64]
[
28
300
null
2
-30
]}
"""
if as_series:
return {s.name: s for s in self}
else:
return {s.name: s.to_list() for s in self}
def to_json(
self,
file: Optional[Union[BytesIO, str, Path]] = None,
pretty: bool = False,
to_string: bool = False,
) -> Optional[str]:
"""
Serialize to JSON representation.
Parameters
----------
file
Write to this file instead of returning an string.
pretty
Pretty serialize json.
to_string
Ignore file argument and return a string.
"""
if to_string:
file = BytesIO()
self._df.to_json(file, pretty)
file.seek(0)
return file.read().decode("utf8")
else:
self._df.to_json(file, pretty)
return None
def to_pandas(
self, *args: Any, date_as_object: bool = False, **kwargs: Any
) -> "pd.DataFrame": # noqa: F821
"""
Cast to a Pandas DataFrame. This requires that Pandas is installed.
This operation clones data.
Parameters
----------
args
Arguments will be sent to pyarrow.Table.to_pandas.
date_as_object
Cast dates to objects. If False, convert to datetime64[ns] dtype.
kwargs
Arguments will be sent to pyarrow.Table.to_pandas.
Examples
--------
>>> import pandas
>>> df = pl.DataFrame({
"foo": [1, 2, 3],
"bar": [6, 7, 8],
"ham": ['a', 'b', 'c']
})
>>> pandas_df = df.to_pandas()
>>> type(pandas_df)
pandas.core.frame.DataFrame
"""
return self.to_arrow().to_pandas(*args, date_as_object=date_as_object, **kwargs)
def to_csv(
self,
file: Optional[Union[TextIO, str, Path]] = None,
has_headers: bool = True,
delimiter: str = ",",
) -> Optional[str]:
"""
Write Dataframe to comma-separated values file (csv).
Parameters
---
file
File path to which the file should be written.
has_headers
Whether or not to include header in the CSV output.
delimiter
Separate CSV fields with this symbol.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3, 4, 5],
>>> "bar": [6, 7, 8, 9, 10],
>>> "ham": ['a', 'b', 'c', 'd','e']
>>> })
>>> df.to_csv('new_file.csv', sep=',')
"""
if file is None:
buffer = BytesIO()
self._df.to_csv(buffer, has_headers, ord(delimiter))
return str(buffer.getvalue(), encoding="utf-8")
if isinstance(file, Path):
file = str(file)
self._df.to_csv(file, has_headers, ord(delimiter))
return None
def to_ipc(self, file: Union[BinaryIO, str, Path]) -> None:
"""
Write to Arrow IPC binary stream, or a feather file.
Parameters
----------
file
File path to which the file should be written.
"""
if isinstance(file, Path):
file = str(file)
self._df.to_ipc(file)
def transpose(self) -> "pl.DataFrame":
"""
Transpose a DataFrame over the diagonal.
Notes
-----
This is a very expensive operation. Perhaps you can do it differently.
Returns
-------
DataFrame
"""
return wrap_df(self._df.transpose())
def to_parquet(
self,
file: Union[str, Path],
compression: str = "snappy",
use_pyarrow: bool = True,
**kwargs: Any,
) -> None:
"""
Write the DataFrame disk in parquet format.
Parameters
----------
file
File path to which the file should be written.
compression
Compression method (only supported if `use_pyarrow`).
use_pyarrow
Use C++ parquet implementation vs rust parquet implementation.
At the moment C++ supports more features.
**kwargs are passed to pyarrow.parquet.write_table
"""
if isinstance(file, Path):
file = str(file)
if use_pyarrow:
tbl = self.to_arrow()
data = {}
for i, column in enumerate(tbl):
# extract the name before casting
if column._name is None:
name = f"column_{i}"
else:
name = column._name
# parquet casts date64 to date32 for some reason
if column.type == pa.date64():
column = pa.compute.cast(column, pa.timestamp("ms", None))
data[name] = column
tbl = pa.table(data)
pa.parquet.write_table(
table=tbl, where=file, compression=compression, **kwargs
)
else:
self._df.to_parquet(file)
def to_numpy(self) -> np.ndarray:
"""
Convert DataFrame to a 2d numpy array.
This operation clones data.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> numpy_array = df.to_numpy()
>>> type(numpy_array)
numpy.ndarray
"""
return np.vstack(
[self.select_at_idx(i).to_numpy() for i in range(self.width)]
).T
def __getstate__(self): # type: ignore
return self.get_columns()
def __setstate__(self, state): # type: ignore
self._df = DataFrame(state)._df
def __mul__(self, other: Any) -> "DataFrame":
other = _prepare_other_arg(other)
return wrap_df(self._df.mul(other._s))
def __truediv__(self, other: Any) -> "DataFrame":
other = _prepare_other_arg(other)
return wrap_df(self._df.div(other._s))
def __add__(self, other: Any) -> "DataFrame":
other = _prepare_other_arg(other)
return wrap_df(self._df.add(other._s))
def __sub__(self, other: Any) -> "DataFrame":
other = _prepare_other_arg(other)
return wrap_df(self._df.sub(other._s))
def __str__(self) -> str:
return self._df.as_str()
def __repr__(self) -> str:
return self.__str__()
def __getattr__(self, item: Any) -> "PySeries":
"""
Access columns as attribute.
"""
try:
return pl.eager.series.wrap_s(self._df.column(item))
except RuntimeError:
raise AttributeError(f"{item} not found")
def __iter__(self) -> Iterator[Any]:
return self.get_columns().__iter__()
def find_idx_by_name(self, name: str) -> int:
"""
Find the index of a column by name.
Parameters
----------
name
Name of the column to find.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.find_idx_by_name("ham"))
2
"""
return self._df.find_idx_by_name(name)
def _pos_idx(self, idx: int, dim: int) -> int:
if idx >= 0:
return idx
else:
return self.shape[dim] + idx
def __getitem__(self, item: Any) -> Any:
"""
Does quite a lot. Read the comments.
"""
if hasattr(item, "_pyexpr"):
return self.select(item)
if isinstance(item, np.ndarray):
item = pl.Series("", item)
# select rows and columns at once
# every 2d selection, i.e. tuple is row column order, just like numpy
if isinstance(item, tuple):
row_selection, col_selection = item
# df[:, unknown]
if isinstance(row_selection, slice):
# multiple slices
# df[:, :]
if isinstance(col_selection, slice):
# slice can be
# by index
# [1:8]
# or by column name
# ["foo":"bar"]
# first we make sure that the slice is by index
start = col_selection.start
stop = col_selection.stop
if isinstance(col_selection.start, str):
start = self.find_idx_by_name(col_selection.start)
if isinstance(col_selection.stop, str):
stop = self.find_idx_by_name(col_selection.stop) + 1
col_selection = slice(start, stop, col_selection.step)
df = self.__getitem__(self.columns[col_selection])
return df[row_selection]
# single slice
# df[:, unknown]
series = self.__getitem__(col_selection)
# s[:]
pl.eager.series.wrap_s(series[row_selection])
# df[2, :] (select row as df)
if isinstance(row_selection, int):
if isinstance(col_selection, (slice, list, np.ndarray)):
df = self[:, col_selection]
return df.slice(row_selection, 1)
# df[2, "a"]
if isinstance(col_selection, str):
return self[col_selection][row_selection]
# column selection can be "a" and ["a", "b"]
if isinstance(col_selection, str):
col_selection = [col_selection]
# df[:, 1]
if isinstance(col_selection, int):
series = self.select_at_idx(col_selection)
return series[row_selection]
if isinstance(col_selection, list):
# df[:, [1, 2]]
# select by column indexes
if isinstance(col_selection[0], int):
series = [self.select_at_idx(i) for i in col_selection]
df = DataFrame(series)
return df[row_selection]
df = self.__getitem__(col_selection)
return df.__getitem__(row_selection)
# select single column
# df["foo"]
if isinstance(item, str):
return pl.eager.series.wrap_s(self._df.column(item))
# df[idx]
if isinstance(item, int):
return self.slice(self._pos_idx(item, dim=0), 1)
# df[:]
if isinstance(item, slice):
# special case df[::-1]
if item.start is None and item.stop is None and item.step == -1:
return self.select(pl.col("*").reverse()) # type: ignore
if getattr(item, "end", False):
raise ValueError("A slice with steps larger than 1 is not supported.")
if item.start is None:
start = 0
else:
start = item.start
if item.stop is None:
stop = self.height
else:
stop = item.stop
length = stop - start
if item.step is None:
# df[start:stop]
return self.slice(start, length)
else:
# df[start:stop:step]
return self.select(
pl.col("*").slice(start, length).take_every(item.step) # type: ignore
)
# select multiple columns
# df["foo", "bar"]
if isinstance(item, Sequence):
if isinstance(item[0], str):
return wrap_df(self._df.select(item))
elif isinstance(item[0], pl.Expr):
return self.select(item)
# select rows by mask or index
# df[[1, 2, 3]]
# df[true, false, true]
if isinstance(item, np.ndarray):
if item.dtype == int:
return wrap_df(self._df.take(item))
if isinstance(item[0], str):
return wrap_df(self._df.select(item))
if isinstance(item, (pl.Series, Sequence)):
if isinstance(item, Sequence):
# only bool or integers allowed
if type(item[0]) == bool:
item = pl.Series("", item)
else:
return wrap_df(
self._df.take([self._pos_idx(i, dim=0) for i in item])
)
dtype = item.dtype
if dtype == Boolean:
return wrap_df(self._df.filter(item.inner()))
if dtype == UInt32:
return wrap_df(self._df.take_with_series(item.inner()))
def __setitem__(self, key: Union[str, int, Tuple[Any, Any]], value: Any) -> None:
# df["foo"] = series
if isinstance(key, str):
try:
self.replace(key, pl.Series(key, value))
except Exception:
self.hstack([pl.Series(key, value)], in_place=True)
# df[idx] = series
elif isinstance(key, int):
assert isinstance(value, pl.Series)
self.replace_at_idx(key, value)
# df[["C", "D"]]
elif isinstance(key, list):
value = np.array(value)
if len(value.shape) != 2:
raise ValueError("can only set multiple columns with 2D matrix")
if value.shape[1] != len(key):
raise ValueError(
"matrix columns should be equal to list use to determine column names"
)
for (i, name) in enumerate(key):
self[name] = value[:, i]
# df[a, b]
elif isinstance(key, tuple):
row_selection, col_selection = key
# get series column selection
s = self.__getitem__(col_selection)
# dispatch to __setitem__ of Series to do modification
s[row_selection] = value
# now find the location to place series
# df[idx]
if isinstance(col_selection, int):
self.replace_at_idx(col_selection, s)
# df["foo"]
elif isinstance(col_selection, str):
self.replace(col_selection, s)
else:
return NotImplemented
def __len__(self) -> int:
return self.height
def _repr_html_(self) -> str:
"""
Used by jupyter notebooks to get a html table.
Output rows and columns can be modified by setting the following ENVIRONMENT variables:
* POLARS_FMT_MAX_COLS: set the number of columns
* POLARS_FMT_MAX_ROWS: set the number of rows
"""
max_cols = int(os.environ.get("POLARS_FMT_MAX_COLS", default=75))
max_rows = int(os.environ.get("POLARS_FMT_MAX_rows", 25))
return "\n".join(NotebookFormatter(self, max_cols, max_rows).render())
def rename(self, mapping: Dict[str, str]) -> "DataFrame":
"""
Rename column names.
Parameters
----------
mapping
Key value pairs that map from old name to new name.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.rename({"foo": "apple"})
╭───────┬─────┬─────╮
│ apple ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═══════╪═════╪═════╡
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
╰───────┴─────┴─────╯
"""
df = self.clone()
for k, v in mapping.items():
df._df.rename(k, v)
return df
def insert_at_idx(self, index: int, series: "pl.Series") -> None:
"""
Insert a Series at a certain column index. This operation is in place.
Parameters
----------
index
Column to insert the new `Series` column.
series
`Series` to insert.
"""
self._df.insert_at_idx(index, series._s)
def filter(self, predicate: "pl.Expr") -> "DataFrame":
"""
Filter the rows in the DataFrame based on a predicate expression.
Parameters
----------
predicate
Expression that evaluates to a boolean Series.
"""
return (
self.lazy()
.filter(predicate)
.collect(no_optimization=True, string_cache=False)
)
@property
def shape(self) -> Tuple[int, int]:
"""
Get the shape of the DataFrame.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5]})
>>> df.shape
shape: (5, 1)
"""
return self._df.shape()
@property
def height(self) -> int:
"""
Get the height of the DataFrame.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5]})
>>> df.height
5
"""
return self._df.height()
@property
def width(self) -> int:
"""
Get the width of the DataFrame.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5]})
>>> df.width
1
"""
return self._df.width()
@property
def columns(self) -> tp.List[str]:
"""
Get or set column names.
Examples
--------
>>> df = pl.DataFrame({
"foo": [1, 2, 3],
"bar": [6, 7, 8],
"ham": ['a', 'b', 'c']
})
>>> df.columns
['foo', 'bar', 'ham']
>>> # Set column names
>>> df.columns = ['apple', 'banana', 'orange']
shape: (3, 3)
╭───────┬────────┬────────╮
│ apple ┆ banana ┆ orange │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═══════╪════════╪════════╡
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
╰───────┴────────┴────────╯
"""
return self._df.columns()
@columns.setter
def columns(self, columns: Sequence[str]) -> None:
"""
Change the column names of the `DataFrame`.
Parameters
----------
columns
A list with new names for the `DataFrame`.
The length of the list should be equal to the width of the `DataFrame`.
"""
self._df.set_column_names(columns)
@property
def dtypes(self) -> tp.List[Type[DataType]]:
"""
Get dtypes of columns in DataFrame. Dtypes can also be found in column headers when printing the DataFrame.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6.0, 7.0, 8.0],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.dtypes
[polars.datatypes.Int64, polars.datatypes.Float64, polars.datatypes.Utf8]
>>> df
shape: (3, 3)
╭─────┬─────┬─────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
╰─────┴─────┴─────╯
"""
return [DTYPES[idx] for idx in self._df.dtypes()]
def describe(self) -> "DataFrame":
"""
Summary statistics for a DataFrame. Only summarizes numeric datatypes at the moment and returns nulls for non numeric datatypes.
Examples
--------
>>> df = pl.DataFrame({
>>> 'a': [1.0, 2.8, 3.0],
>>> 'b': [4, 5, 6],
>>> "c": [True, False, True]
>>> })
>>> df.describe()
shape: (5, 4)
╭──────────┬───────┬─────┬──────╮
│ describe ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 ┆ f64 │
╞══════════╪═══════╪═════╪══════╡
│ "mean" ┆ 2.267 ┆ 5 ┆ null │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ "std" ┆ 1.102 ┆ 1 ┆ null │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ "min" ┆ 1 ┆ 4 ┆ 0.0 │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ "max" ┆ 3 ┆ 6 ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ "median" ┆ 2.8 ┆ 5 ┆ null │
╰──────────┴───────┴─────┴──────╯
"""
def describe_cast(self: "DataFrame") -> "DataFrame":
columns = []
for s in self:
if s.is_numeric() or s.is_boolean():
columns.append(s.cast(float))
else:
columns.append(s)
return pl.DataFrame(columns)
summary = pl.functions.concat(
[
describe_cast(self.mean()), # type: ignore
describe_cast(self.std()),
describe_cast(self.min()), # type: ignore
describe_cast(self.max()), # type: ignore
describe_cast(self.median()),
]
)
summary.insert_at_idx(
0, pl.Series("describe", ["mean", "std", "min", "max", "median"])
)
return summary
def replace_at_idx(self, index: int, series: "pl.Series") -> None:
"""
Replace a column at an index location.
Parameters
----------
index
Column index.
series
Series that will replace the column.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> x = pl.Series("apple", [10, 20, 30])
>>> df.replace_at_idx(0, x)
shape: (3, 3)
╭───────┬─────┬─────╮
│ apple ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═══════╪═════╪═════╡
│ 10 ┆ 6 ┆ "a" │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 20 ┆ 7 ┆ "b" │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 30 ┆ 8 ┆ "c" │
╰───────┴─────┴─────╯
"""
self._df.replace_at_idx(index, series._s)
def sort(
self,
by: Union[str, "pl.Expr", tp.List["pl.Expr"]],
reverse: Union[bool, tp.List[bool]] = False,
in_place: bool = False,
) -> Optional["DataFrame"]:
"""
Sort the DataFrame by column.
Parameters
----------
by
By which column to sort. Only accepts string.
reverse
Reverse/descending sort.
in_place
Perform operation in-place.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6.0, 7.0, 8.0],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.sort('foo', reverse=True)
shape: (3, 3)
╭─────┬─────┬─────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 8 ┆ "c" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 6 ┆ "a" │
╰─────┴─────┴─────╯
**Sort by multiple columns.**
For multiple columns we can also use expression syntax.
>>> df.sort([col("foo"), col("bar") ** 2], reverse=[True, False])
"""
if type(by) is list or isinstance(by, pl.Expr):
df = (
self.lazy()
.sort(by, reverse)
.collect(no_optimization=True, string_cache=False)
)
if in_place:
self._df = df._df
return None
return df
if in_place:
self._df.sort_in_place(by, reverse)
return None
else:
return wrap_df(self._df.sort(by, reverse))
def frame_equal(self, other: "DataFrame", null_equal: bool = False) -> bool:
"""
Check if DataFrame is equal to other.
Parameters
----------
other
DataFrame to compare with.
null_equal
Consider null values as equal.
Examples
--------
>>> df1 = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6.0, 7.0, 8.0],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df2 = pl.DataFrame({
>>> "foo": [3, 2, 1],
>>> "bar": [8.0, 7.0, 6.0],
>>> "ham": ['c', 'b', 'a']
>>> })
>>> df1.frame_equal(df1)
True
>>> df1.frame_equal(df2)
False
"""
return self._df.frame_equal(other._df, null_equal)
def replace(self, column: str, new_col: "pl.Series") -> None:
"""
Replace a column by a new Series.
Parameters
----------
column
Column to replace.
new_col
New column to insert.
"""
self._df.replace(column, new_col.inner())
def slice(self, offset: int, length: int) -> "DataFrame":
"""
Slice this DataFrame over the rows direction.
Parameters
----------
offset
Offset index.
length
Length of the slice.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6.0, 7.0, 8.0],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.slice(1, 2)
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 2 ┆ 7 ┆ "b" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
└─────┴─────┴─────┘
"""
if length < 0:
length = self.height - offset + length
return wrap_df(self._df.slice(offset, length))
def limit(self, length: int = 5) -> "DataFrame":
"""
Get first N rows as DataFrame.
See Also `DataFrame.head`
Parameters
----------
length
Amount of rows to take.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.limit(2)
shape: (2, 3)
╭─────┬─────┬─────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" │
╰─────┴─────┴─────╯
"""
return self.head(length)
def head(self, length: int = 5) -> "DataFrame":
"""
Get first N rows as DataFrame.
Parameters
----------
length
Length of the head.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3, 4, 5],
>>> "bar": [6, 7, 8, 9, 10],
>>> "ham": ['a', 'b', 'c', 'd','e']
>>> })
>>> df.head(3)
shape: (3, 3)
╭─────┬─────┬─────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
╰─────┴─────┴─────╯
"""
return wrap_df(self._df.head(length))
def tail(self, length: int = 5) -> "DataFrame":
"""
Get last N rows as DataFrame.
Parameters
----------
length
Length of the tail.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3, 4, 5],
>>> "bar": [6, 7, 8, 9, 10],
>>> "ham": ['a', 'b', 'c', 'd','e']
>>> })
>>> df.tail(3)
shape: (3, 3)
╭─────┬─────┬─────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 8 ┆ "c" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 9 ┆ "d" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 5 ┆ 10 ┆ "e" │
╰─────┴─────┴─────╯
"""
return wrap_df(self._df.tail(length))
def drop_nulls(self, subset: Optional[tp.List[str]] = None) -> "DataFrame":
"""
Return a new DataFrame where the null values are dropped.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, None, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.drop_nulls()
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
└─────┴─────┴─────┘
"""
if subset is not None and isinstance(subset, str):
subset = [subset]
return wrap_df(self._df.drop_nulls(subset))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
"""
Apply a function on Self.
Parameters
----------
func
Callable.
args
Arguments.
kwargs
Keyword arguments.
"""
return func(self, *args, **kwargs)
def groupby(
self, by: Union[str, tp.List[str]], maintain_order: bool = False
) -> "GroupBy":
"""
Start a groupby operation.
Parameters
----------
by
Column(s) to group by.
maintain_order
Make sure that the order of the groups remain consistent. This is more expensive than a default groupby.
Note that this only works in expression aggregations.
Examples
--------
Below we group by column `"a"`, and we sum column `"b"`.
>>> df = pl.DataFrame({
>>> "a": ["a", "b", "a", "b", "b", "c"],
>>> "b": [1, 2, 3, 4, 5, 6],
>>> "c": [6, 5, 4, 3, 2, 1],
>>> })
>>> assert (
>>> df.groupby("a")["b"]
>>> .sum()
>>> .sort(by_column="a")
>>> .frame_equal(DataFrame({"a": ["a", "b", "c"], "": [4, 11, 6]})))
We can also loop over the grouped `DataFrame`
>>> for sub_df in df.groupby("a"):
>>> print(sub_df)
shape: (3, 3)
╭─────┬─────┬─────╮
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ "b" ┆ 2 ┆ 5 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 4 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 5 ┆ 2 │
╰─────┴─────┴─────╯
shape: (1, 3)
╭─────┬─────┬─────╮
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ "c" ┆ 6 ┆ 1 │
╰─────┴─────┴─────╯
shape: (2, 3)
╭─────┬─────┬─────╮
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ "a" ┆ 1 ┆ 6 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 3 ┆ 4 │
╰─────┴─────┴─────╯
"""
if isinstance(by, str):
by = [by]
return GroupBy(self._df, by, maintain_order=maintain_order, downsample=False)
def downsample(self, by: Union[str, tp.List[str]], rule: str, n: int) -> "GroupBy":
"""
Start a downsampling groupby operation.
Parameters
----------
by
Column that will be used as key in the groupby operation.
This should be a date64/date32 column.
rule
Units of the downscaling operation.
Any of:
- "month"
- "week"
- "day"
- "hour"
- "minute"
- "second"
n
Number of units (e.g. 5 "day", 15 "minute".
"""
return GroupBy(
self._df,
by,
maintain_order=False,
downsample=True,
rule=rule,
downsample_n=n,
)
def join(
self,
df: "DataFrame",
left_on: Optional[
Union[str, "pl.Expr", tp.List[str], tp.List["pl.Expr"]]
] = None,
right_on: Optional[
Union[str, "pl.Expr", tp.List[str], tp.List["pl.Expr"]]
] = None,
on: Optional[Union[str, tp.List[str]]] = None,
how: str = "inner",
) -> Union["DataFrame", "pl.LazyFrame"]:
"""
SQL like joins.
Parameters
----------
df
DataFrame to join with.
left_on
Name(s) of the left join column(s).
right_on
Name(s) of the right join column(s).
on
Name(s) of the join columns in both DataFrames.
how
Join strategy
- "inner"
- "left"
- "outer"
- "asof"
- "cross"
Returns
-------
Joined DataFrame
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6.0, 7.0, 8.0],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> other_df = pl.DataFrame({
>>> "apple": ['x', 'y', 'z'],
>>> "ham": ['a', 'b', 'd']
>>> })
>>> df.join(other_df, on='ham')
shape: (2, 4)
╭─────┬─────┬─────┬───────╮
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str ┆ str │
╞═════╪═════╪═════╪═══════╡
│ 1 ┆ 6 ┆ "a" ┆ "x" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" ┆ "y" │
╰─────┴─────┴─────┴───────╯
>>> df.join(other_df, on='ham', how='outer')
shape: (4, 4)
╭──────┬──────┬─────┬───────╮
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str ┆ str │
╞══════╪══════╪═════╪═══════╡
│ 1 ┆ 6 ┆ "a" ┆ "x" │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" ┆ "y" │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ null ┆ null ┆ "d" ┆ "z" │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" ┆ null │
╰──────┴──────┴─────┴───────╯
Asof join
=========
This is similar to a left-join except that we match on nearest key rather than equal keys.
The keys must be sorted to perform an asof join
"""
if how == "cross":
return wrap_df(self._df.join(df._df, [], [], how))
left_on_: Union[tp.List[str], tp.List[pl.Expr], None]
if isinstance(left_on, (str, pl.Expr)):
left_on_ = [left_on] # type: ignore[assignment]
else:
left_on_ = left_on
right_on_: Union[tp.List[str], tp.List[pl.Expr], None]
if isinstance(right_on, (str, pl.Expr)):
right_on_ = [right_on] # type: ignore[assignment]
else:
right_on_ = right_on
if isinstance(on, str):
left_on_ = [on]
right_on_ = [on]
elif isinstance(on, list):
left_on_ = on
right_on_ = on
if left_on_ is None or right_on_ is None:
raise ValueError("You should pass the column to join on as an argument.")
if isinstance(left_on_[0], pl.Expr) or isinstance(right_on_[0], pl.Expr):
return self.lazy().join(df.lazy(), left_on, right_on, how=how)
else:
return wrap_df(self._df.join(df._df, left_on_, right_on_, how))
def apply(
self,
f: Callable[[Tuple[Any]], Any],
return_dtype: Optional[Type[DataType]] = None,
) -> "pl.Series":
"""
Apply a custom function over the rows of the DataFrame. The rows are passed as tuple.
Beware, this is slow.
Parameters
----------
f
Custom function/ lambda function.
return_dtype
Output type of the operation. If none given, Polars tries to infer the type.
"""
return pl.eager.series.wrap_s(self._df.apply(f, return_dtype))
def with_column(self, column: Union["pl.Series", "pl.Expr"]) -> "DataFrame":
"""
Return a new DataFrame with the column added or replaced.
Parameters
----------
column
Series, where the name of the Series refers to the column in the DataFrame.
"""
if isinstance(column, pl.Expr):
return self.with_columns([column])
else:
return wrap_df(self._df.with_column(column._s))
def hstack(
self, columns: Union[tp.List["pl.Series"], "DataFrame"], in_place: bool = False
) -> Optional["DataFrame"]:
"""
Return a new DataFrame grown horizontally by stacking multiple Series to it.
Parameters
----------
columns
Series to stack.
in_place
Modify in place.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> x = pl.Series("apple", [10, 20, 30])
>>> df.hstack([x])
shape: (3, 4)
╭─────┬─────┬─────┬───────╮
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str ┆ i64 │
╞═════╪═════╪═════╪═══════╡
│ 1 ┆ 6 ┆ "a" ┆ 10 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" ┆ 20 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" ┆ 30 │
╰─────┴─────┴─────┴───────╯
"""
if not isinstance(columns, list):
columns = columns.get_columns()
if in_place:
self._df.hstack_mut([s.inner() for s in columns])
return None
else:
return wrap_df(self._df.hstack([s.inner() for s in columns]))
def vstack(self, df: "DataFrame", in_place: bool = False) -> Optional["DataFrame"]:
"""
Grow this DataFrame vertically by stacking a DataFrame to it.
Parameters
----------
df
DataFrame to stack.
in_place
Modify in place
Examples
--------
>>> df1 = pl.DataFrame({
>>> "foo": [1, 2],
>>> "bar": [6, 7],
>>> "ham": ['a', 'b']
>>> })
>>> df2 = pl.DataFrame({
>>> "foo": [3, 4],
>>> "bar": [8 , 9],
>>> "ham": ['c', 'd']
>>> })
>>> df1.vstack(df2)
shape: (4, 3)
╭─────┬─────┬─────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 9 ┆ "d" │
╰─────┴─────┴─────╯
"""
if in_place:
self._df.vstack_mut(df._df)
return None
else:
return wrap_df(self._df.vstack(df._df))
def drop(self, name: Union[str, tp.List[str]]) -> "DataFrame":
"""
Remove column from DataFrame and return as new.
Parameters
----------
name
Column(s) to drop.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6.0, 7.0, 8.0],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.drop('ham')
shape: (3, 2)
╭─────┬─────╮
│ foo ┆ bar │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 1 ┆ 6 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 │
╰─────┴─────╯
"""
if isinstance(name, list):
df = self.clone()
for name in name:
df._df.drop_in_place(name)
return df
return wrap_df(self._df.drop(name))
def drop_in_place(self, name: str) -> "pl.Series":
"""
Drop in place.
Parameters
----------
name
Column to drop.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.drop_in_place("ham")
shape: (3, 2)
╭─────┬─────╮
│ foo ┆ bar │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 6 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 │
╰─────┴─────╯
"""
return pl.eager.series.wrap_s(self._df.drop_in_place(name))
def select_at_idx(self, idx: int) -> "pl.Series":
"""
Select column at index location.
Parameters
----------
idx
Location of selection.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.select_at_idx(1))
shape: (3,)
Series: 'bar' [i64]
[
6
7
8
]
"""
return pl.eager.series.wrap_s(self._df.select_at_idx(idx))
def clone(self) -> "DataFrame":
"""
Very cheap deep clone.
"""
return wrap_df(self._df.clone())
def __copy__(self) -> "DataFrame":
return self.clone()
def __deepcopy__(self, memodict={}) -> "DataFrame": # type: ignore
return self.clone()
def get_columns(self) -> tp.List["pl.Series"]:
"""
Get the DataFrame as a List of Series.
"""
return list(map(lambda s: pl.eager.series.wrap_s(s), self._df.get_columns()))
def fill_null(self, strategy: Union[str, "pl.Expr"]) -> "DataFrame":
"""
Fill None/missing values by a filling strategy or an Expression evaluation.
Parameters
----------
strategy
One of:
- "backward"
- "forward"
- "mean"
- "min'
- "max"
- "zero"
- "one"
Or an expression.
Returns
-------
DataFrame with None replaced with the filling strategy.
"""
if isinstance(strategy, pl.Expr):
return self.lazy().fill_null(strategy).collect(no_optimization=True)
if not isinstance(strategy, str):
return self.fill_null(pl.lit(strategy))
return wrap_df(self._df.fill_null(strategy))
def fill_nan(self, fill_value: "pl.Expr") -> "DataFrame":
"""
Fill None/missing values by a an Expression evaluation.
Warnings
--------
NOTE that floating point NaN (No a Number) are not missing values!
to replace missing values, use `fill_null`.
Parameters
----------
fill_value
value to fill NaN with
Returns
-------
DataFrame with NaN replaced with fill_value
"""
return self.lazy().fill_nan(fill_value).collect(no_optimization=True)
def explode(
self, columns: Union[str, tp.List[str], "pl.Expr", tp.List["pl.Expr"]]
) -> "DataFrame":
"""
Explode `DataFrame` to long format by exploding a column with Lists.
Parameters
----------
columns
Column of LargeList type.
Returns
-------
DataFrame
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [[1, 2], [1, 3], [4, 3], [5, 5, 5], [6], [2, 1, 2]]
>>> })
>>> df
shape: (6, 2)
╭─────────┬────────────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ list [i64] │
╞═════════╪════════════╡
│ "c" ┆ [1, 2] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "c" ┆ [1, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "a" ┆ [4, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "c" ┆ [5, 5, 5] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "a" ┆ [6] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "b" ┆ [2, 1, 2] │
╰─────────┴────────────╯
>>> df.explode("nrs")
shape: (13, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ ... ┆ ... │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 2 │
╰─────────┴─────╯
"""
return self.lazy().explode(columns).collect(no_optimization=True)
def melt(
self, id_vars: Union[tp.List[str], str], value_vars: Union[tp.List[str], str]
) -> "DataFrame":
"""
Unpivot DataFrame to long format.
Parameters
----------
id_vars
Columns to use as identifier variables.
value_vars
Values to use as identifier variables.
Returns
-------
"""
if isinstance(value_vars, str):
value_vars = [value_vars]
if isinstance(id_vars, str):
id_vars = [id_vars]
return wrap_df(self._df.melt(id_vars, value_vars))
def shift(self, periods: int) -> "DataFrame":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with `Nones`.
Parameters
----------
periods
Number of places to shift (may be negative).
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.shift(periods=1)
shape: (3, 3)
┌──────┬──────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞══════╪══════╪══════╡
│ null ┆ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" │
└──────┴──────┴──────┘
>>> df.shift(periods=-1)
shape: (3, 3)
┌──────┬──────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞══════╪══════╪══════╡
│ 2 ┆ 7 ┆ "b" │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null ┆ null │
└──────┴──────┴──────┘
"""
return wrap_df(self._df.shift(periods))
def shift_and_fill(
self, periods: int, fill_value: Union[int, str, float]
) -> "DataFrame":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with the result of the `fill_value` expression.
Parameters
----------
periods
Number of places to shift (may be negative).
fill_value
fill None values with this value.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.shift_and_fill(periods=1, fill_value=0)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 0 ┆ 0 ┆ "0" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ "b" │
└─────┴─────┴─────┘
"""
return (
self.lazy()
.shift_and_fill(periods, fill_value)
.collect(no_optimization=True, string_cache=False)
)
def is_duplicated(self) -> "pl.Series":
"""
Get a mask of all duplicated rows in this DataFrame.
"""
return pl.eager.series.wrap_s(self._df.is_duplicated())
def is_unique(self) -> "pl.Series":
"""
Get a mask of all unique rows in this DataFrame.
"""
return pl.eager.series.wrap_s(self._df.is_unique())
def lazy(self) -> "pl.LazyFrame":
"""
Start a lazy query from this point. This returns a `LazyFrame` object.
Operations on a `LazyFrame` are not executed until this is requested by either calling:
* `.fetch()` (run on a small number of rows)
* `.collect()` (run on all data)
* `.describe_plan()` (print unoptimized query plan)
* `.describe_optimized_plan()` (print optimized query plan)
* `.show_graph()` (show (un)optimized query plan) as graphiz graph)
Lazy operations are advised because they allow for query optimization and more parallelization.
"""
return pl.lazy.frame.wrap_ldf(self._df.lazy())
def select(
self, exprs: Union[str, "pl.Expr", Sequence[str], Sequence["pl.Expr"]]
) -> "DataFrame":
"""
Select columns from this DataFrame.
Parameters
----------
exprs
Column or columns to select.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.select('foo')
shape: (3, 1)
┌─────┐
│ foo │
│ --- │
│ i64 │
╞═════╡
│ 1 │
├╌╌╌╌╌┤
│ 2 │
├╌╌╌╌╌┤
│ 3 │
└─────┘
"""
return (
self.lazy().select(exprs).collect(no_optimization=True, string_cache=False)
)
def with_columns(self, exprs: Union["pl.Expr", tp.List["pl.Expr"]]) -> "DataFrame":
"""
Add or overwrite multiple columns in a DataFrame.
Parameters
----------
exprs
List of Expressions that evaluate to columns.
"""
if not isinstance(exprs, list):
exprs = [exprs]
return (
self.lazy()
.with_columns(exprs)
.collect(no_optimization=True, string_cache=False)
)
def n_chunks(self) -> int:
"""
Get number of chunks used by the ChunkedArrays of this DataFrame.
"""
return self._df.n_chunks()
def max(self, axis: int = 0) -> Union["DataFrame", "pl.Series"]:
"""
Aggregate the columns of this DataFrame to their maximum value.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.max()
shape: (1, 3)
╭─────┬─────┬──────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 3 ┆ 8 ┆ null │
╰─────┴─────┴──────╯
"""
if axis == 0:
return wrap_df(self._df.max())
if axis == 1:
return pl.eager.series.wrap_s(self._df.hmax())
raise ValueError("Axis should be 0 or 1.")
def min(self, axis: int = 0) -> Union["DataFrame", "pl.Series"]:
"""
Aggregate the columns of this DataFrame to their minimum value.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.min()
shape: (1, 3)
╭─────┬─────┬──────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 6 ┆ null │
╰─────┴─────┴──────╯
"""
if axis == 0:
return wrap_df(self._df.min())
if axis == 1:
return pl.eager.series.wrap_s(self._df.hmin())
raise ValueError("Axis should be 0 or 1.")
def sum(
self, axis: int = 0, null_strategy: str = "ignore"
) -> Union["DataFrame", "pl.Series"]:
"""
Aggregate the columns of this DataFrame to their sum value.
Parameters
----------
axis
either 0 or 1
null_strategy
{'ignore', 'propagate'}
this argument is only used if axis == 1
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.sum()
shape: (1, 3)
╭─────┬─────┬──────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 6 ┆ 21 ┆ null │
╰─────┴─────┴──────╯
"""
if axis == 0:
return wrap_df(self._df.sum())
if axis == 1:
return pl.eager.series.wrap_s(self._df.hsum(null_strategy))
raise ValueError("Axis should be 0 or 1.")
def mean(
self, axis: int = 0, null_strategy: str = "ignore"
) -> Union["DataFrame", "pl.Series"]:
"""
Aggregate the columns of this DataFrame to their mean value.
Parameters
----------
axis
either 0 or 1
null_strategy
{'ignore', 'propagate'}
this argument is only used if axis == 1
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.mean()
shape: (1, 3)
╭─────┬─────┬──────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 2 ┆ 7 ┆ null │
╰─────┴─────┴──────╯
"""
if axis == 0:
return wrap_df(self._df.mean())
if axis == 1:
return pl.eager.series.wrap_s(self._df.hmean(null_strategy))
raise ValueError("Axis should be 0 or 1.")
def std(self) -> "DataFrame":
"""
Aggregate the columns of this DataFrame to their standard deviation value.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.std()
shape: (1, 3)
╭─────┬─────┬──────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 1 ┆ null │
╰─────┴─────┴──────╯
"""
return wrap_df(self._df.std())
def var(self) -> "DataFrame":
"""
Aggregate the columns of this DataFrame to their variance value.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.var()
shape: (1, 3)
╭─────┬─────┬──────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 1 ┆ null │
╰─────┴─────┴──────╯
"""
return wrap_df(self._df.var())
def median(self) -> "DataFrame":
"""
Aggregate the columns of this DataFrame to their median value.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.median()
shape: (1, 3)
╭─────┬─────┬──────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 1 ┆ null │
╰─────┴─────┴──────╯
"""
return wrap_df(self._df.median())
def quantile(self, quantile: float) -> "DataFrame":
"""
Aggregate the columns of this DataFrame to their quantile value.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.quantile(0.5)
shape: (1, 3)
╭─────┬─────┬──────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 2 ┆ 7 ┆ null │
╰─────┴─────┴──────╯
"""
return wrap_df(self._df.quantile(quantile))
def to_dummies(self) -> "DataFrame":
"""
Get one hot encoded dummy variables.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.to_dummies()
shape: (3, 9)
╭───────┬───────┬───────┬───────┬─────┬───────┬───────┬───────┬───────╮
│ foo_1 ┆ foo_2 ┆ foo_3 ┆ bar_6 ┆ ... ┆ bar_8 ┆ ham_a ┆ ham_b ┆ ham_c │
│ --- ┆ --- ┆ --- ┆ --- ┆ ┆ --- ┆ --- ┆ --- ┆ --- │
│ u8 ┆ u8 ┆ u8 ┆ u8 ┆ ┆ u8 ┆ u8 ┆ u8 ┆ u8 │
╞═══════╪═══════╪═══════╪═══════╪═════╪═══════╪═══════╪═══════╪═══════╡
│ 1 ┆ 0 ┆ 0 ┆ 1 ┆ ... ┆ 0 ┆ 1 ┆ 0 ┆ 0 │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 0 ┆ 1 ┆ 0 ┆ 0 ┆ ... ┆ 0 ┆ 0 ┆ 1 ┆ 0 │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 0 ┆ 0 ┆ 1 ┆ 0 ┆ ... ┆ 1 ┆ 0 ┆ 0 ┆ 1 │
╰───────┴───────┴───────┴───────┴─────┴───────┴───────┴───────┴───────╯
"""
return wrap_df(self._df.to_dummies())
def drop_duplicates(
self,
maintain_order: bool = True,
subset: Optional[Union[str, tp.List[str]]] = None,
) -> "DataFrame":
"""
Drop duplicate rows from this DataFrame.
Note that this fails if there is a column of type `List` in the DataFrame.
"""
if subset is not None and not isinstance(subset, list):
subset = [subset]
return wrap_df(self._df.drop_duplicates(maintain_order, subset))
def rechunk(self) -> "DataFrame":
"""
Rechunk the data in this DataFrame to a contiguous allocation.
This will make sure all subsequent operations have optimal and predictable performance.
"""
return wrap_df(self._df.rechunk())
def null_count(self) -> "DataFrame":
"""
Create a new DataFrame that shows the null counts per column.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, None, 3],
>>> "bar": [6, 7, None],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.null_count()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ u32 ┆ u32 ┆ u32 │
╞═════╪═════╪═════╡
│ 1 ┆ 1 ┆ 0 │
└─────┴─────┴─────┘
"""
return wrap_df(self._df.null_count())
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
with_replacement: bool = False,
) -> "DataFrame":
"""
Sample from this DataFrame by setting either `n` or `frac`.
Parameters
----------
n
Number of samples < self.len() .
frac
Fraction between 0.0 and 1.0 .
with_replacement
Sample with replacement.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.sample(n=2)
shape: (2, 3)
╭─────┬─────┬─────╮
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
╰─────┴─────┴─────╯
"""
if n is not None:
return wrap_df(self._df.sample_n(n, with_replacement))
return wrap_df(self._df.sample_frac(frac, with_replacement))
def fold(
self, operation: Callable[["pl.Series", "pl.Series"], "pl.Series"]
) -> "pl.Series":
"""
Apply a horizontal reduction on a DataFrame. This can be used to effectively
determine aggregations on a row level, and can be applied to any DataType that
can be supercasted (casted to a similar parent type).
An example of the supercast rules when applying an arithmetic operation on two DataTypes are for instance:
Int8 + Utf8 = Utf8
Float32 + Int64 = Float32
Float32 + Float64 = Float64
Examples
--------
>>> # A horizontal sum operation
>>> df = pl.DataFrame(
>>> {"a": [2, 1, 3],
>>> "b": [1, 2, 3],
>>> "c": [1.0, 2.0, 3.0]
>>> })
>>> df.fold(lambda s1, s2: s1 + s2)
Series: 'a' [f64]
[
4
5
9
]
>>> # A horizontal minimum operation
>>> df = pl.DataFrame(
>>> {"a": [2, 1, 3],
>>> "b": [1, 2, 3],
>>> "c": [1.0, 2.0, 3.0]
>>> })
>>> df.fold(lambda s1, s2: s1.zip_with(s1 < s2, s2))
Series: 'a' [f64]
[
1
1
3
]
>>> # A horizontal string concattenation
>>> df = pl.DataFrame(
>>> {"a": ["foo", "bar", 2],
>>> "b": [1, 2, 3],
>>> "c": [1.0, 2.0, 3.0]
>>> })
>>> df.fold(lambda s1, s2: s1 + s2)
Series: '' [f64]
[
"foo11"
"bar22
"233"
]
Parameters
----------
operation
function that takes two `Series` and returns a `Series`.
"""
if self.width == 1:
return self.select_at_idx(0)
df = self
acc = operation(df.select_at_idx(0), df.select_at_idx(1))
for i in range(2, df.width):
acc = operation(acc, df.select_at_idx(i))
return acc
def row(self, index: int) -> Tuple[Any]:
"""
Get a row as tuple.
Parameters
----------
index
Row index.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.row(2)
(3, 8, 'c')
"""
return self._df.row_tuple(index)
def rows(self) -> tp.List[Tuple[Any]]:
"""
Convert columnar data to rows as python tuples.
"""
return self._df.row_tuples()
def shrink_to_fit(self, in_place: bool = False) -> Optional["DataFrame"]:
"""
Shrink memory usage of this DataFrame to fit the exact capacity needed to hold the data.
"""
if in_place:
self._df.shrink_to_fit()
return None
else:
df = self.clone()
df._df.shrink_to_fit()
return df
def hash_rows(
self, k0: int = 0, k1: int = 1, k2: int = 2, k3: int = 3
) -> "pl.Series":
"""
Hash and combine the rows in this DataFrame.
Hash value is UInt64
Parameters
----------
k0
seed parameter
k1
seed parameter
k2
seed parameter
k3
seed parameter
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.hash(k0=42)
shape: (3,)
Series: '' [u64]
[
1208206736888326229
8040480609798856146
18282897888575762835
]
"""
return pl.eager.series.wrap_s(self._df.hash_rows(k0, k1, k2, k3))
def interpolate(self) -> "DataFrame":
"""
Interpolate intermediate values. The interpolation method is linear.
"""
return self.select(pl.col("*").interpolate()) # type: ignore
class GroupBy:
"""
Starts a new GroupBy operation.
You can also loop over this Object to loop over `DataFrames` with unique groups.
Examples
--------
>>> for group in df.groupby("foo"):
>>> print(group)
"""
def __init__(
self,
df: "PyDataFrame",
by: Union[str, tp.List[str]],
maintain_order: bool = False,
downsample: bool = False,
rule: Optional[str] = None,
downsample_n: int = 0,
):
self._df = df
self.by = by
self.maintain_order = maintain_order
self.downsample = downsample
self.rule = rule
self.downsample_n = downsample_n
def __getitem__(self, item: Any) -> "GBSelection":
return self.select(item)
def __iter__(self) -> Iterable[Any]:
groups_df = self.groups()
groups = groups_df["groups"]
df = wrap_df(self._df)
for i in range(groups_df.height):
yield df[groups[i]]
def get_group(self, group_value: Union[Any, Tuple[Any]]) -> DataFrame:
"""
Select a single group as a new DataFrame.
Parameters
----------
group_value
Group to select.
"""
groups_df = self.groups()
groups = groups_df["groups"]
if not isinstance(group_value, list):
group_value = [group_value]
by = self.by
if not isinstance(by, list):
by = [by]
mask = None
for column, group_val in zip(by, group_value):
local_mask = groups_df[column] == group_val
if mask is None:
mask = local_mask
else:
mask = mask & local_mask
# should be only one match
try:
groups_idx = groups[mask][0]
except IndexError:
raise ValueError(f"no group: {group_value} found")
df = wrap_df(self._df)
return df[groups_idx]
def groups(self) -> DataFrame:
"""
Return a `DataFrame` with:
* the groupby keys
* the group indexes aggregated as lists
"""
return wrap_df(self._df.groupby(self.by, None, "groups"))
def apply(self, f: Callable[[DataFrame], DataFrame]) -> DataFrame:
"""
Apply a function over the groups as a sub-DataFrame.
Parameters
----------
f
Custom function.
Returns
-------
DataFrame
"""
return wrap_df(self._df.groupby_apply(self.by, f))
def agg(
self,
column_to_agg: Union[
tp.List[Tuple[str, tp.List[str]]],
Dict[str, Union[str, tp.List[str]]],
tp.List["pl.Expr"],
"pl.Expr",
],
) -> DataFrame:
"""
Use multiple aggregations on columns. This can be combined with complete lazy API
and is considered idiomatic polars.
Parameters
----------
column_to_agg
map column to aggregation functions.
Examples:
## use lazy API syntax (recommended)
[col("foo").sum(), col("bar").min()]
## column name to aggregation with tuples:
[("foo", ["sum", "n_unique", "min"]),
("bar": ["max"])]
## column name to aggregation with dict:
{"foo": ["sum", "n_unique", "min"],
"bar": "max" }
Returns
-------
Result of groupby split apply operations.
Examples
--------
>>> # use lazy API
>>> (df.groupby(["foo", "bar])
>>> .agg([pl.sum("ham"), col("spam").tail(4).sum()])
>>> # use a dict
>>> (df.groupby(["foo", "bar])
>>> .agg({"spam": ["sum", "min"})
"""
if isinstance(column_to_agg, pl.Expr):
column_to_agg = [column_to_agg]
if isinstance(column_to_agg, dict):
column_to_agg = [
(column, [agg] if isinstance(agg, str) else agg)
for (column, agg) in column_to_agg.items()
]
elif isinstance(column_to_agg, list):
if isinstance(column_to_agg[0], tuple):
column_to_agg = [ # type: ignore[misc]
(column, [agg] if isinstance(agg, str) else agg) # type: ignore[misc]
for (column, agg) in column_to_agg
]
elif isinstance(column_to_agg[0], pl.Expr):
return (
wrap_df(self._df)
.lazy()
.groupby(self.by, maintain_order=self.maintain_order)
.agg(column_to_agg) # type: ignore[arg-type]
.collect(no_optimization=True, string_cache=False)
)
pass
else:
raise ValueError(
f"argument: {column_to_agg} not understood, have you passed a list of expressions?"
)
else:
raise ValueError(
f"argument: {column_to_agg} not understood, have you passed a list of expressions?"
)
if self.downsample:
return wrap_df(
self._df.downsample_agg(
self.by, self.rule, self.downsample_n, column_to_agg
)
)
return wrap_df(self._df.groupby_agg(self.by, column_to_agg))
def head(self, n: int = 5) -> DataFrame:
"""
Return first n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [1, 2, 3, 4, 5, 6]
>>> })
>>> df
shape: (6, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
╰─────────┴─────╯
>>> (df.groupby("letters")
>>> .head(2)
>>> .sort("letters")
>>> )
shape: (5, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
╰─────────┴─────╯
"""
return (
wrap_df(self._df)
.lazy()
.groupby(self.by, self.maintain_order)
.head(n) # type: ignore[arg-type]
.collect(no_optimization=True, string_cache=False)
)
def tail(self, n: int = 5) -> DataFrame:
"""
Return last n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [1, 2, 3, 4, 5, 6]
>>> })
>>> df
shape: (6, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
╰─────────┴─────╯
>>> (df.groupby("letters")
>>> .tail(2)
>>> .sort("letters")
>>> )
shape: (5, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
╰─────────┴─────╯
"""
return (
wrap_df(self._df)
.lazy()
.groupby(self.by, self.maintain_order)
.tail(n) # type: ignore[arg-type]
.collect(no_optimization=True, string_cache=False)
)
def select(self, columns: Union[str, tp.List[str]]) -> "GBSelection":
"""
.. deprecated:: 0.8.16
Use `groupby.agg(col("selection"))` instead
Select the columns that will be aggregated.
Parameters
----------
columns
One or multiple columns.
"""
if self.downsample:
raise ValueError("select not supported in downsample operation")
if isinstance(columns, str):
columns = [columns]
return GBSelection(self._df, self.by, columns)
def select_all(self) -> "GBSelection":
"""
.. deprecated:: 0.8.16
Use `groupby.agg(col("*"))` instead
Select all columns for aggregation.
"""
return GBSelection(
self._df, self.by, None, self.downsample, self.rule, self.downsample_n
)
def pivot(self, pivot_column: str, values_column: str) -> "PivotOps":
"""
Do a pivot operation based on the group key, a pivot column and an aggregation function on the values column.
Parameters
----------
pivot_column
Column to pivot.
values_column
Column that will be aggregated.
"""
if self.downsample:
raise ValueError("Pivot not supported in downsample operation.")
return PivotOps(self._df, self.by, pivot_column, values_column)
def first(self) -> DataFrame:
"""
Aggregate the first values in the group.
"""
return self.select_all().first()
def last(self) -> DataFrame:
"""
Aggregate the last values in the group.
"""
return self.select_all().last()
def sum(self) -> DataFrame:
"""
Reduce the groups to the sum.
"""
return self.select_all().sum()
def min(self) -> DataFrame:
"""
Reduce the groups to the minimal value.
"""
return self.select_all().min()
def max(self) -> DataFrame:
"""
Reduce the groups to the maximal value.
"""
return self.select_all().max()
def count(self) -> DataFrame:
"""
Count the number of values in each group.
"""
return self.select_all().count()
def mean(self) -> DataFrame:
"""
Reduce the groups to the mean values.
"""
return self.select_all().mean()
def n_unique(self) -> DataFrame:
"""
Count the unique values per group.
"""
return self.select_all().n_unique()
def quantile(self, quantile: float) -> DataFrame:
"""
Compute the quantile per group.
"""
return self.select_all().quantile(quantile)
def median(self) -> DataFrame:
"""
Return the median per group.
"""
return self.select_all().median()
def agg_list(self) -> DataFrame:
"""
Aggregate the groups into Series.
"""
return self.select_all().agg_list()
class PivotOps:
"""
Utility class returned in a pivot operation.
"""
def __init__(
self,
df: DataFrame,
by: Union[str, tp.List[str]],
pivot_column: str,
values_column: str,
):
self._df = df
self.by = by
self.pivot_column = pivot_column
self.values_column = values_column
def first(self) -> DataFrame:
"""
Get the first value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "first")
)
def sum(self) -> DataFrame:
"""
Get the sum per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "sum")
)
def min(self) -> DataFrame:
"""
Get the minimal value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "min")
)
def max(self) -> DataFrame:
"""
Get the maximal value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "max")
)
def mean(self) -> DataFrame:
"""
Get the mean value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "mean")
)
def count(self) -> DataFrame:
"""
Count the values per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "count")
)
def median(self) -> DataFrame:
"""
Get the median value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "median")
)
class GBSelection:
"""
Utility class returned in a groupby operation.
"""
def __init__(
self,
df: "PyDataFrame",
by: Union[str, tp.List[str]],
selection: Optional[tp.List[str]],
downsample: bool = False,
rule: Optional[str] = None,
downsample_n: int = 0,
):
self._df = df
self.by = by
self.selection = selection
self.downsample = downsample
self.rule = rule
self.n = downsample_n
def first(self) -> DataFrame:
"""
Aggregate the first values in the group.
"""
if self.downsample:
return wrap_df(self._df.downsample(self.by, self.rule, self.n, "first"))
return wrap_df(self._df.groupby(self.by, self.selection, "first"))
def last(self) -> DataFrame:
"""
Aggregate the last values in the group.
"""
if self.downsample:
return wrap_df(self._df.downsample(self.by, self.rule, self.n, "last"))
return wrap_df(self._df.groupby(self.by, self.selection, "last"))
def sum(self) -> DataFrame:
"""
Reduce the groups to the sum.
"""
if self.downsample:
return wrap_df(self._df.downsample(self.by, self.rule, self.n, "sum"))
return wrap_df(self._df.groupby(self.by, self.selection, "sum"))
def min(self) -> DataFrame:
"""
Reduce the groups to the minimal value.
"""
if self.downsample:
return wrap_df(self._df.downsample(self.by, self.rule, self.n, "min"))
return wrap_df(self._df.groupby(self.by, self.selection, "min"))
def max(self) -> DataFrame:
"""
Reduce the groups to the maximal value.
"""
if self.downsample:
return wrap_df(self._df.downsample(self.by, self.rule, self.n, "max"))
return wrap_df(self._df.groupby(self.by, self.selection, "max"))
def count(self) -> DataFrame:
"""
Count the number of values in each group.
"""
if self.downsample:
return wrap_df(self._df.downsample(self.by, self.rule, self.n, "count"))
return wrap_df(self._df.groupby(self.by, self.selection, "count"))
def mean(self) -> DataFrame:
"""
Reduce the groups to the mean values.
"""
if self.downsample:
return wrap_df(self._df.downsample(self.by, self.rule, self.n, "mean"))
return wrap_df(self._df.groupby(self.by, self.selection, "mean"))
def n_unique(self) -> DataFrame:
"""
Count the unique values per group.
"""
if self.downsample:
return wrap_df(self._df.downsample(self.by, self.rule, self.n, "n_unique"))
return wrap_df(self._df.groupby(self.by, self.selection, "n_unique"))
def quantile(self, quantile: float) -> DataFrame:
"""
Compute the quantile per group.
"""
if self.downsample:
raise ValueError("quantile operation not supported during downsample")
return wrap_df(self._df.groupby_quantile(self.by, self.selection, quantile))
def median(self) -> DataFrame:
"""
Return the median per group.
"""
if self.downsample:
return wrap_df(self._df.downsample(self.by, self.rule, self.n, "median"))
return wrap_df(self._df.groupby(self.by, self.selection, "median"))
def agg_list(self) -> DataFrame:
"""
Aggregate the groups into Series.
"""
if self.downsample:
return wrap_df(self._df.downsample(self.by, self.rule, self.n, "agg_list"))
return wrap_df(self._df.groupby(self.by, self.selection, "agg_list"))
def apply(
self,
func: Callable[[Any], Any],
return_dtype: Optional[Type[DataType]] = None,
) -> DataFrame:
"""
Apply a function over the groups.
"""
df = self.agg_list()
if self.selection is None:
raise TypeError(
"apply not available for Groupby.select_all(). Use select() instead."
)
for name in self.selection:
s = df.drop_in_place(name + "_agg_list").apply(func, return_dtype)
s.rename(name, in_place=True)
df[name] = s
return df
|
the-stack_106_17803
|
class Part1:
def __init__(self):
with open("day16.txt") as f:
self.data = [i.strip() for i in f.readlines()]
self.constraints = {}
self.valid = {}
for line in self.data:
if "your ticket" in line:
break
parts = line.split(':')
if len(parts) == 2:
#print(parts)
key = parts[0].strip()
valid = set()
valids = parts[1].split('or')
for v in valids:
bits = [int(i) for i in v.split('-')]
for i in range(bits[0], bits[1]+1):
valid.add(i)
self.valid[i] = 1
self.constraints[key] = valid
section = 1
self.tickets = []
for line in self.data:
if not line:
continue
if "your ticket" in line:
section = 2
continue
if "nearby tickets" in line:
section = 3
continue
if section == 2:
bits = line.split(',')
self.your_ticket = [int(i) for i in line.split(',')]
if section == 3:
t = [int(i) for i in line.split(',')]
self.tickets.append(t)
def run(self):
total = 0
#print (self.valid)
#print(self.tickets)
for t in self.tickets:
for n in t:
if n not in self.valid:
total += n
print(total)
def run2(self):
self.valid_tickets = []
for t in self.tickets:
is_valid = True
for n in t:
if n not in self.valid:
is_valid = False
if is_valid:
self.valid_tickets.append(t)
# Look at each field
ranges = []
for i in range(len(self.tickets[0])):
nums = set()
for t in self.valid_tickets:
nums.add(t[i])
ranges.append(nums)
print(ranges)
candidates = []
for nums in ranges:
candidate = []
for k, v in self.constraints.items():
if not (nums - v):
candidate.append(k)
candidates.append(candidate)
print(candidates)
while True:
removed = False
for c in candidates:
if len(c) == 1:
remove = c[0]
for d in candidates:
if remove in d:
if len(d) > 1:
d.remove(remove)
removed = True
if not removed:
break
print(candidates)
for c in candidates:
print(len(c))
print (len(self.tickets[0]))
print (len(self.valid_tickets))
dep_fields = []
for i, c in enumerate(candidates):
print (i,c)
if "departure" in c[0]:
dep_fields.append(i)
print(dep_fields)
prod = 1
for i in dep_fields:
prod *= self.your_ticket[i]
print(prod)
p = Part1()
p.run()
p.run2()
|
the-stack_106_17804
|
# Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of fine-tuning OpenAI GPT-2 language model.
"""
import argparse
import importlib
import os
from typing import Any
import torch
import texar.torch as tx
parser = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint', type=str, default=None,
help="Model checkpoint to load model weights from.")
parser.add_argument(
"--pretrained-model-name", type=str, default="gpt2-small",
choices=tx.modules.GPT2Decoder.available_checkpoints(),
help="Name of the pre-trained checkpoint to load.")
parser.add_argument(
'--config-train', type=str, default="config_train",
help="Configurations of GPT-2 training, including data and "
"optimization hyperparameters.")
parser.add_argument(
"--output-dir", default="output/",
help="The output directory where the model checkpoints will be written.")
parser.add_argument(
'--temperature', type=float, default=0.7,
help="Softmax temperature for top-k sample decoding. Must be strictly "
"greater than 0. Defaults to 0.7.")
parser.add_argument(
'--top-k', type=int, default=40,
help="The number of top most likely candidates from a vocab distribution.")
parser.add_argument(
'--top-p', type=float, default=None,
help="Select tokens with cumulative probability of at most 'p' when "
"arranged in decreasing order. This will use "
"TopPSampleEmbeddingHelper for decoding.")
parser.add_argument(
"--do-train", action="store_true", help="Whether to run training.")
parser.add_argument(
"--do-eval", action="store_true",
help="Whether to run eval on the dev set.")
parser.add_argument(
"--do-test", action="store_true",
help="Whether to run test on the test set.")
args = parser.parse_args()
config_train: Any = importlib.import_module(args.config_train)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main() -> None:
"""
Builds the model and runs.
"""
tx.utils.maybe_create_dir(args.output_dir)
max_decoding_length = config_train.max_decoding_length
# Build the GPT-2 model
model = tx.modules.GPT2Decoder(args.pretrained_model_name)
if args.checkpoint:
ckpt = torch.load(args.checkpoint)
model.load_state_dict(ckpt['model'])
model.to(device)
if max_decoding_length > model.hparams.position_size:
raise ValueError(
"max_decoding_length should not be greater than position size")
# Create a GPT-2 tokenizer (BPE encoding)
tokenizer = tx.data.GPT2Tokenizer(
pretrained_model_name=args.pretrained_model_name)
# Loads data
datasets = {}
if args.do_train:
train_dataset = tx.data.RecordData(
hparams=config_train.train_hparam, device=device)
datasets['train'] = train_dataset
if args.do_eval:
eval_dataset = tx.data.RecordData(
hparams=config_train.eval_hparam, device=device)
datasets['eval'] = eval_dataset
if args.do_test:
test_dataset = tx.data.RecordData(
hparams=config_train.test_hparam, device=device)
datasets['test'] = test_dataset
iterator = tx.data.DataIterator(datasets)
# For training
train_op = tx.core.get_train_op(
params=model.parameters(), hparams=config_train.opt)
end_token = tokenizer.map_token_to_id('<|endoftext|>')
def _get_helper(start_tokens):
if args.top_p:
helper = tx.modules.TopPSampleEmbeddingHelper(
start_tokens=start_tokens,
end_token=end_token,
p=args.top_p,
softmax_temperature=args.temperature)
else:
helper = tx.modules.TopKSampleEmbeddingHelper(
start_tokens=start_tokens,
end_token=end_token,
top_k=args.top_k,
softmax_temperature=args.temperature)
return helper
dis_steps = config_train.display_steps
eval_steps = config_train.eval_steps
eval_best = {"loss": 1e8, "ppl": 1e8}
def _train_epoch():
r"""Trains on the training set, and evaluates on the dev set
periodically.
"""
iterator.switch_to_dataset("train")
model.train()
step = 0
for batch in iterator:
input_ids = batch["text_ids"]
outputs = model(inputs=input_ids, decoding_strategy='train_greedy')
loss = tx.losses.sequence_sparse_softmax_cross_entropy(
labels=batch['text_ids'][:, 1:],
logits=outputs.logits[:, :-1, :],
sequence_length=batch['length'] - 1,
average_across_timesteps=True,
sum_over_timesteps=False)
loss.backward()
train_op()
if dis_steps > 0 and step % dis_steps == 0:
print("step={}, loss={:.4f}".format(step, loss))
if eval_steps > 0 and step % eval_steps == 0:
_eval_epoch()
step += 1
@torch.no_grad()
def _eval_epoch():
r"""Evaluates on the dev set.
"""
iterator.switch_to_dataset("eval")
model.eval()
nsamples = 0
avg_rec = tx.utils.AverageRecorder()
for batch in iterator:
input_ids = batch["text_ids"]
outputs = model(inputs=input_ids)
loss = tx.losses.sequence_sparse_softmax_cross_entropy(
labels=batch['text_ids'][:, 1:],
logits=outputs.logits[:, :-1, :],
sequence_length=batch['length'] - 1,
average_across_timesteps=True,
sum_over_timesteps=False)
ppl = torch.exp(loss)
batch_size = input_ids.size()[0]
avg_rec.add([loss, ppl], batch_size)
nsamples += batch_size
print("eval loss: {:.4f}; ppl: {:.4f}; "
"nsamples: {:d}".format(avg_rec.avg(0), avg_rec.avg(1), nsamples))
if args.do_train and avg_rec.avg(0) < eval_best["loss"]:
eval_best["loss"] = avg_rec.avg(0)
eval_best["ppl"] = avg_rec.avg(1)
ckpt_fn = os.path.join(args.output_dir, 'model_best.ckpt')
torch.save(model.state_dict(), ckpt_fn)
print("Checkpoint best to {}".format(ckpt_fn))
@torch.no_grad()
def _test_epoch():
r"""Generates samples on the test set.
"""
iterator.switch_to_dataset("test")
model.eval()
_all_inputs = []
_all_samples = []
for batch in iterator:
input_ids = batch["text_ids"]
length = batch["length"]
start_tokens = input_ids[:, 0]
helper = _get_helper(start_tokens)
output, _ = model(
context=input_ids,
context_sequence_length=length,
max_decoding_length=max_decoding_length,
helper=helper)
sample_id = output.sample_id
_inputs = []
for i, l in zip(input_ids, length):
# Delete padding
_inputs.append(i[:l].tolist())
_all_inputs.extend(_inputs)
_samples = []
for s, l in zip(sample_id, length):
# Delte inputs from samples
_samples.append(s[l:].tolist())
_all_samples.extend(_samples)
# Parse samples and write to file
eos_token_id = tokenizer.map_token_to_id('<|endoftext|>')
_all_input_text = []
for i in _all_inputs:
if i[0] == eos_token_id:
# '<|endoftext|>' is used as the BOS token. Delete it here
i = i[1:]
i_text = tokenizer.map_id_to_text(i)
_all_input_text.append(i_text)
# '<|endoftext|>' is used as the PAD token. Delete them here
_all_input_text = tx.utils.strip_eos(_all_input_text,
eos_token='<|endoftext|>')
_all_samples_text = []
for i, s in zip(_all_inputs, _all_samples):
s_text = tokenizer.map_id_to_text(s)
s_text = s_text.replace('\n', ' ')
_all_samples_text.append(s_text)
_all_samples_text = tx.utils.strip_eos(_all_samples_text,
eos_token='<|endoftext|>')
output_file = os.path.join(args.output_dir, "test_samples.tsv")
print('Write samples to {}'.format(output_file))
tx.utils.write_paired_text(
_all_input_text, _all_samples_text, output_file)
if args.do_train:
for _ in range(config_train.max_train_epoch):
_train_epoch()
torch.save(model.state_dict(),
os.path.join(args.output_dir, 'model.ckpt'))
if args.do_eval:
_eval_epoch()
if args.do_test:
_test_epoch()
if __name__ == "__main__":
main()
|
the-stack_106_17806
|
import logging
import time
import random
import numpy as np
from smac.configspace import impute_inactive_values, get_one_exchange_neighbourhood, Configuration
__author__ = "Aaron Klein, Marius Lindauer"
__copyright__ = "Copyright 2015, ML4AAD"
__license__ = "3-clause BSD"
__maintainer__ = "Aaron Klein"
__email__ = "[email protected]"
__version__ = "0.0.1"
class LocalSearch(object):
def __init__(self, acquisition_function, config_space,
epsilon=0.00001, max_iterations=None, rng=None):
"""
Implementation of SMAC's local search
Parameters:
----------
acquisition_function: function
The function which the local search tries to maximize
config_space: ConfigSpace
Parameter configuration space
epsilon: float
In order to perform a local move one of the incumbent's neighbors
needs at least an improvement higher than epsilon
max_iterations: int
Maximum number of iterations that the local search will perform
"""
self.config_space = config_space
self.acquisition_function = acquisition_function
self.epsilon = epsilon
self.max_iterations = max_iterations
if rng is None:
self.rng = np.random.RandomState(seed=np.random.randint(10000))
else:
self.rng = rng
self.logger = logging.getLogger("localsearch")
def maximize(self, start_point, *args):
"""
Starts a local search from the given startpoint and quits
if either the max number of steps is reached or no neighbor
with an higher improvement was found.
Parameters:
----------
start_point: np.array(1, D):
The point from where the local search starts
*args :
Additional parameters that will be passed to the
acquisition function
Returns:
-------
incumbent np.array(1, D):
The best found configuration
acq_val_incumbent np.array(1,1) :
The acquisition value of the incumbent
"""
incumbent = start_point
# Compute the acquisition value of the incumbent
incumbent_ = impute_inactive_values(incumbent)
acq_val_incumbent = self.acquisition_function(
incumbent_.get_array(),
*args)
local_search_steps = 0
neighbors_looked_at = 0
time_n = []
while True:
local_search_steps += 1
if local_search_steps % 1000 == 0:
self.logger.warn("Local search took already %d iterations." \
"Is it maybe stuck in a infinite loop?", local_search_steps)
# Get neighborhood of the current incumbent
# by randomly drawing configurations
changed_inc = False
all_neighbors = get_one_exchange_neighbourhood(incumbent,
seed=self.rng.seed())
self.rng.shuffle(all_neighbors)
for neighbor in all_neighbors:
s_time = time.time()
neighbor_ = impute_inactive_values(neighbor)
n_array = neighbor_.get_array()
acq_val = self.acquisition_function(n_array, *args)
neighbors_looked_at += 1
time_n.append(time.time() - s_time)
if acq_val > acq_val_incumbent + self.epsilon:
self.logger.debug("Switch to one of the neighbors")
incumbent = neighbor
acq_val_incumbent = acq_val
changed_inc = True
break
if (not changed_inc) or (self.max_iterations != None
and local_search_steps == self. max_iterations):
self.logger.debug("Local search took %d steps and looked at %d configurations. "
"Computing the acquisition value for one "
"configuration took %f seconds on average.",
local_search_steps, neighbors_looked_at, np.mean(time_n))
break
return incumbent, acq_val_incumbent
|
the-stack_106_17807
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
"""
Usage:
def options(opt):
opt.load('android_studio')
$ waf android_studio
"""
import os, sys, shutil, random, time, re, stat, string
import xml.etree.ElementTree as ET
from collections import defaultdict
from pprint import pprint, pformat
from string import maketrans
from StringIO import StringIO
from waflib import Node, Context, TaskGen, Build, Utils, Logs
from waflib.Configure import conf
from waflib.TaskGen import feature, after_method
from waf_branch_spec import CONFIGURATIONS
################################################################
# Defaults #
PROJECT_BUILD_TEMPLATE = r'''
apply from: "waf.gradle"
buildscript {
repositories {
jcenter()
}
dependencies {
classpath "com.android.tools.build:gradle-experimental:0.10.0-alpha4"
// NOTE: Do not place your application dependencies here: they belong
// in the individual module build.gradle files
}
}
allprojects { project ->
buildDir "${binTempRoot}/${rootProject.name}/${project.name}"
repositories {
jcenter()
}
}
'''
WAF_GRADLE_TASK = r'''
import org.apache.tools.ant.taskdefs.condition.Os
// the WAF gradle task wrapper
class WafTask extends Exec {
WafTask() {
// set the working directory for the waf command
// default to be X levels up for each of the game projects
workingDir "${project.ext.engineRoot}"
// base commandline tool
if (Os.isFamily(Os.FAMILY_WINDOWS))
{
commandLine "cmd", "/c", "lmbr_waf.bat", "--from-android-studio"
}
else
{
commandLine "./lmbr_waf.sh", "--from-android-studio"
}
}
}
// export these closures as common waf properties
project.ext.WafTask = WafTask
'''
TASK_GEN_HEADER = r'''
// generate all the build tasks
afterEvaluate {
// disable the built in native tasks
tasks.whenTaskAdded { task ->
if (task.name.contains("Shared") || task.name.contains("Static"))
{
task.deleteAllActions()
}
}
// add the waf build tasks to the build chain
project.ext.platforms.each { platform ->
project.ext.configurations.each { config ->
String targetName = "${platform.capitalize()}${config.capitalize()}"
// create the custom waf task
String wafTaskName = "build${targetName}Waf"
String commandArgs = "build_${platform}_${config} -p all %s"
tasks.create(name: wafTaskName, type: WafTask, description: "lmbr_waf ${commandArgs}") {
doFirst {
args commandArgs.split(" ")
}
}
// add the waf task to the build chain
String compileJavaTask = "compile${targetName}Sources"
tasks.whenTaskAdded { task ->
if (task.name == compileJavaTask) {
task.dependsOn wafTaskName
}
}
'''
COPY_TASK_GEN = r'''
// generate the copy apk task
String copyTaskName = "copy${platform.capitalize()}${config.capitalize()}Apk"
tasks.create(name: copyTaskName, type: Copy){
from "${buildDir}/outputs/apk"
into "${engineRoot}/${androidBinMap[platform][config]}"
include "${project.name}-${platform}-${config}.apk"
rename { String fileName ->
fileName.replace("-${platform}-${config}", "")
}
}
// add the copy apk task to the build chain
String assembleTask = "assemble${targetName}"
tasks.whenTaskAdded { task ->
if (task.name == assembleTask) {
task.finalizedBy copyTaskName
}
}
'''
TASK_GEN_FOOTER = r'''
}
}
}
'''
GRADLE_PROPERTIES = r'''
################################################################
# This file was automatically created by WAF
# WARNING! All modifications will be lost!
################################################################
# Android Studio project settings overrides
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Enable Gradle as a daemon to improve the startup and execution time
org.gradle.daemon=true
# Since Lumberyard is a large project, we need to override the JVM daemon process memory settings
# Defaults -Xmx10248m -XX:MaxPermSize=256m
org.gradle.jvmargs=-Xmx2048m -XX:MaxPermSize=512m
# Enable the new (incubating) selective Gradle configure mode. This should help improve
# build time due to the large size of Lumberyard.
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:configuration_on_demand
org.gradle.configureondemand=true
'''
# Defaults END #
################################################################
################################################################
def debug_log_value(key_word, value):
Logs.debug('android_studio: -- %32s: %s', key_word, value)
################################################################
def inject_auto_gen_header(writer):
writer('////////////////////////////////////////////////////////////////\n')
writer('// This file was automatically created by WAF\n')
writer('// WARNING! All modifications will be lost!\n')
writer('////////////////////////////////////////////////////////////////\n')
################################################################
def find_common_path(paths_list):
root_path = os.path.commonprefix(paths_list)
if not os.path.exists(root_path) or not os.path.isdir(root_path):
root_path = root_path[:root_path.rindex(os.sep)]
return root_path
################################################################
def get_android_sdk_version_number(conf):
sdk_version = conf.get_android_sdk_version()
version_tokens = sdk_version.split('-')
if version_tokens[1].isdigit():
return int(version_tokens[1])
else:
return 19 # fall back to the lowest version of android we support
################################################################
def to_list(value):
if isinstance(value, set):
return list(value)
elif isinstance(value, list):
return value[:]
else:
return [value]
################################################################
def convert_to_gradle_name(string):
tokens = string.split('_')
output = ""
for index, value in enumerate(tokens):
if index == 0:
output = value
else:
output += value.title()
return output
################################################################
def indent_text(text, indent, stream):
indent_space = ' ' * indent * 4
result = ''
for line in text.splitlines():
result += "%s%s\n" % (indent_space, line)
stream.write(result)
################################################################
class GradleList(list):
"""
Wrapped list to determine when to use special formating
"""
def __init__(self, *k):
for elem in k:
self.append(elem)
################################################################
class GradleNode(object):
################################
def to_string(self, obj):
if isinstance(obj, bool):
return str(obj).lower()
elif isinstance(obj, str):
# convert to forward slash because on windows requires a double back slash
# to be print but it only prints one in the string
return '"%s"' % obj.replace('\\', '/')
elif isinstance(obj, GradleList):
if len(obj) == 1:
return self.to_string(obj[0])
else:
return pformat(obj).replace("'", '"')
else:
return str(obj)
################################
def write_value(self, obj, stream, indent = 0):
next_indent = indent + 1
if isinstance(obj, GradleNode):
obj.write(stream, indent)
elif isinstance(obj, dict):
for key, value in obj.items():
indent_text('%s %s' % (convert_to_gradle_name(key), self.to_string(value)), next_indent, stream)
################################
def write_internal_dict(self, stream, indent = 0):
next_indent = indent + 1
hidden_attrib_prefix = '_%s_' % self.__class__.__name__
for attribute, value in self.__dict__.items():
if not value or attribute.startswith(hidden_attrib_prefix):
continue
elif isinstance(value, GradleNode):
value.write(stream, next_indent)
elif isinstance(value, dict):
for key, subvalue in value.items():
if not subvalue:
continue
if hasattr(subvalue, 'gradle_name'):
self.write_value(subvalue, stream, next_indent)
else:
indent_text('%s {' % key, next_indent, stream)
self.write_value(subvalue, stream, next_indent)
indent_text('}', next_indent, stream)
elif isinstance(value, GradleList):
format = '%s.add(%s)' if len(value) == 1 else '%s.addAll(%s)'
indent_text(format % (convert_to_gradle_name(attribute), self.to_string(value)), next_indent, stream)
elif isinstance(value, list):
for elem in value:
indent_text('%s %s' % (convert_to_gradle_name(attribute), self.to_string(elem)), next_indent, stream)
else:
indent_text('%s %s' % (convert_to_gradle_name(attribute), self.to_string(value)), next_indent, stream)
################################
def write(self, stream, indent = 0):
if hasattr(self.__class__, 'gradle_name'):
indent_text('%s {' % self.__class__.gradle_name, indent, stream)
self.write_internal_dict(stream, indent)
indent_text('}', indent, stream)
else:
self.write_internal_dict(stream, indent)
################################################################
class DefaultConfig(GradleNode):
gradle_name = 'defaultConfig'
################################
class ApiLevel(GradleNode):
################################
def __init__(self, api_level = 0):
self.api_level = api_level
################################
def __nonzero__(self):
return self.api_level > 0
################################
def __init__(self):
self.application_id = ''
self.sdk_versions = defaultdict(DefaultConfig.ApiLevel)
################################
def __nonzero__(self):
if self.application_id or self.sdk_versions:
return True
else:
return False
################################
def set_properties(self, **props):
if 'application_id' in props:
self.application_id = props['application_id']
if 'min_sdk' in props:
self.sdk_versions['minSdkVersion'].api_level = props['min_sdk']
if 'target_sdk' in props:
self.sdk_versions['targetSdkVersion'].api_level = props['target_sdk']
################################################################
class SigningConfigRef:
"""
Wrapper class to provide custom formating
"""
################################
def __init__(self, config_name = ''):
self.config_name = config_name
################################
def __nonzero__(self):
return True if self.config_name else False
################################
def __str__(self):
return '= $("android.signingConfigs.%s")' % self.config_name
################################################################
class SigningConfigs(GradleNode):
"""
This has to be outside the android block in the model definition in the experimental version
of gradle we are using. However, it is still part of the android properties.
"""
gradle_name = 'android.signingConfigs'
################################
class Config(GradleNode):
################################
def __init__(self):
self.key_alias = ''
self.key_password = ''
self.store_file = ''
self.store_password = ''
################################
def __nonzero__(self):
if self.key_alias and self.key_password and self.store_file and self.store_password:
return True
else:
return False
################################
def __init__(self):
self.configs = defaultdict(SigningConfigs.Config)
################################
def __nonzero__(self):
for value in self.configs:
if value:
return True
return False
################################
def add_signing_config(self, config_name, **config_props):
config = SigningConfigs.Config()
for attribute in config.__dict__.keys():
config.__dict__[attribute] = config_props.get(attribute, '')
config_name = 'create("%s")' % config_name
self.configs[config_name] = config
################################################################
class NdkProperties(GradleNode):
gradle_name = 'ndk'
################################
def __init__(self):
self.module_name = ""
self.debuggable = False
self.abi_filters = GradleList()
self._c_flags = GradleList()
self.cpp_flags = GradleList()
################################
def __nonzero__(self):
if self.module_name or self.debuggable or self.abi_filters or self._c_flags or self.cpp_flags:
return True
else:
return False
################################
def set_module_name(self, module_name):
self.module_name = module_name
################################
def set_debuggable(self, debuggable):
self.debuggable = debuggable
################################
def add_abi_filters(self, *abi_filters):
self.abi_filters.extend(abi_filters)
################################
def add_general_compiler_flags(self, *flags):
self._c_flags.extend(flags)
self.cpp_flags.extend(flags)
################################################################
class Sources(GradleNode):
gradle_name = 'sources'
################################
class Paths(GradleNode):
################################
def __init__(self):
self.src_dir = []
self.src_file = []
self.excludes = GradleList()
################################
def __nonzero__(self):
if self.src_dir or self.src_file or self.excludes:
return True
else:
return False
################################
def add_src_paths(self, *paths):
self.src_dir.extend(paths)
################################
def add_src_files(self, *files):
self.src_file.extend(files)
################################
def add_excludes(self, *excludes):
self.excludes.extend(excludes)
################################
class Dependencies(GradleNode):
gradle_name = 'dependencies'
################################
def __init__(self):
self.project = []
################################
def __nonzero__(self):
return True if self.project else False
################################
def add_projects(self, *projects):
deps = [':%s' % proj if proj[0] != ':' else proj for proj in projects]
self.project.extend(deps)
################################
def clear_projects(self, *projects):
del self.project[:]
################################
class Set(GradleNode):
################################
def __init__(self):
self.paths = defaultdict(Sources.Paths)
self.dependencies = Sources.Dependencies()
################################
def __nonzero__(self):
if self.paths:
for key, value in self.paths.items():
if value:
return True
if self.dependencies:
return True
else:
return False
################################
def add_export_paths(self, *paths):
self.paths['exportedHeaders'].add_src_paths(*paths)
################################
def add_src_paths(self, *paths):
self.paths['source'].add_src_paths(*paths)
################################
def add_project_dependencies(self, *projects):
self.dependencies.add_projects(*projects)
################################
def clear_project_dependencies(self):
self.dependencies.clear_projects()
################################
class SourceTypes(GradleNode):
################################
def __init__(self):
self.sets = defaultdict(Sources.Set)
################################
def __nonzero__(self):
for value in self.sets.values():
if value:
return True
return False
################################
def set_java_properties(self, **props):
if 'java_src' in props:
java_props = self.sets['java']
src = to_list(props['java_src'])
java_props.add_src_paths(*src)
if 'aidl_src' in props:
aidl_props = self.sets['aidl']
aidl_src = to_list(props['aidl_src'])
aidl_props.add_src_paths(*aidl_src)
if 'res_src' in props:
android_resources = self.sets['res']
res = to_list(props['res_src'])
android_resources.add_src_paths(*res)
if 'manifest_path' in props:
android_manifest = self.sets['manifest']
manifest = to_list(props['manifest_path'])
android_manifest.add_src_paths(*manifest)
################################
def get_jni_dependencies(self):
jni_props = self.sets['jni']
return jni_props.dependencies.project
################################
def set_jni_dependencies(self, dependencies):
jni_props = self.sets['jni']
jni_props.clear_project_dependencies()
jni_props.add_project_dependencies(*dependencies)
################################
def set_jni_properties(self, **props):
jni_props = self.sets['jni']
if 'jni_exports' in props:
exports = to_list(props['jni_exports'])
jni_props.add_export_paths(*exports)
if 'jni_src' in props:
src = to_list(props['jni_src'])
jni_props.add_src_paths(*src)
if 'jni_dependencies' in props:
deps = to_list(props['jni_dependencies'])
jni_props.add_project_dependencies(*deps)
################################
def __init__(self):
self.variants = defaultdict(Sources.SourceTypes)
################################
def __nonzero__(self):
for value in self.variants.values():
if value:
return True
return False
################################
def set_main_properties(self, **props):
self.set_variant_properties('main', **props)
################################
def set_variant_properties(self, variant_name, **props):
variant = self.variants[variant_name]
variant.set_java_properties(**props)
variant.set_jni_properties(**props)
################################
def validate_and_set_main_dependencies(self, *dependencies):
self.validate_and_set_variant_dependencies('main', *dependencies)
################################
def validate_and_set_variant_dependencies(self, variant_name, dependencies):
variant = self.variants[variant_name]
current_deps = variant.get_jni_dependencies()
valid_deps = [project for project in current_deps if project[1:] in dependencies]
variant.set_jni_dependencies(valid_deps)
################################################################
class Builds(GradleNode):
gradle_name = 'buildTypes'
################################################################
class Type(GradleNode):
################################
def __init__(self):
self.ndk = NdkProperties()
self.debuggable = False
self.signing_config = SigningConfigRef()
################################
def set_debuggable(self, debuggable):
self.debuggable = debuggable
self.ndk.set_debuggable(debuggable)
################################
def add_ndk_compiler_flags(self, *flags):
self.ndk.add_general_compiler_flags(*flags)
################################
def __init__(self):
self.types = defaultdict(Builds.Type)
################################
def __nonzero__(self):
return True if self.types else False
################################
def add_build_type(self, build_name, **build_props):
if build_name not in ['debug', 'release']:
build_name = 'create("%s")' % build_name
debuggable = False
if 'debug' in build_name:
debuggable = True
build_type = self.types[build_name]
build_type.set_debuggable(build_props.get('debuggable', debuggable))
if 'signing_config_ref' in build_props:
build_type.signing_config = SigningConfigRef(build_props['signing_config_ref'])
if 'ndk_flags' in build_props:
flags = to_list(build_props['ndk_flags'])
build_type.add_ndk_compiler_flags(*flags)
################################################################
class Products(GradleNode):
gradle_name = 'productFlavors'
################################################################
class Flavor(GradleNode):
################################
def __init__(self):
self.ndk = NdkProperties()
################################
def add_abi_filters(self, *abis):
self.ndk.add_abi_filters(*abis)
################################
def add_ndk_compiler_flags(self, *flags):
self.ndk.add_general_compiler_flags(*flags)
################################
def __init__(self):
self.flavors = defaultdict(Products.Flavor)
################################
def __nonzero__(self):
return True if self.flavors else False
################################
def add_product_flavor(self, flavor_name, **flavor_props):
key_name = 'create("%s")' % flavor_name
product_flavor = self.flavors[key_name]
if 'abis' in flavor_props:
abis = to_list(flavor_props['abis'])
product_flavor.add_abi_filters(*abis)
if 'ndk_flags' in flavor_props:
flags = to_list(flavor_props['ndk_flags'])
product_flavor.add_ndk_compiler_flags(*flags)
################################################################
class Android(GradleNode):
gradle_name = 'android'
################################
def __init__(self):
self.compile_sdk_version = 19 # default value is lowest version we support
self.build_tools_version = ''
self.default_config = DefaultConfig()
self.ndk = NdkProperties()
self.build_types = Builds()
self.product_flavors = Products()
self.sources = Sources()
################################
def set_general_properties(self, **props):
if 'module_name' in props:
self.ndk.set_module_name(props['module_name'])
if 'sdk_version' in props:
self.compile_sdk_version = props['sdk_version']
if 'build_tools_version' in props:
self.build_tools_version = props['build_tools_version']
self.default_config.set_properties(**props)
################################
def set_ndk_properties(self, **props):
if 'module_name' in props:
self.ndk.set_module_name(props['module_name'])
if 'ndk_flags' in props:
flags = to_list(props['ndk_flags'])
self.ndk.add_general_compiler_flags(*flags)
################################
def add_build_type(self, build_name, **props):
self.build_types.add_build_type(build_name, **props)
################################
def add_product_flavor(self, flavor_name, **props):
self.product_flavors.add_product_flavor(flavor_name, **props)
################################
def set_main_source_paths(self, **paths):
self.sources.set_main_properties(**paths)
################################
def validate_and_set_main_dependencies(self, dependencies):
self.sources.validate_and_set_main_dependencies(dependencies)
################################################################
class ModelType:
Native_lib, Application, Android_lib = range(3)
################################################################
class Model(GradleNode):
gradle_name = 'model'
################################
def __init__(self, parent):
self.android = Android()
self.signing_configs = SigningConfigs()
self.__parent = parent
################################
def apply_platform_configs(self, ctx, platform_abis, platform_defines, configs):
if self.__parent.type == ModelType.Android_lib:
return
android = self.android
if self.__parent.type == ModelType.Application:
signing_props = {
'key_alias' : ctx.get_android_dev_keystore_alias(),
'store_file' : ctx.get_android_dev_keystore_path(),
'key_password' : ctx.options.dev_key_pass,
'store_password' : ctx.options.dev_store_pass
}
self.signing_configs.add_signing_config('Development', **signing_props)
for platform, abi in platform_abis.items():
common_platform_defines = platform_defines[platform]
for config in configs:
env_name = '%s_%s' % (platform, config)
env = ctx.all_envs[env_name]
build_props = {}
all_config_defines = env['DEFINES']
filtered_defines = [define for define in all_config_defines if '"' not in define and define not in common_platform_defines]
# add the defines so the symbol resolution can happen in the editor
build_props['ndk_flags'] = ['-D%s' % define for define in filtered_defines]
if self.__parent.type == ModelType.Application:
build_props['signing_config_ref'] = 'Development'
android.add_build_type(config, **build_props)
flags = ['-D%s' % define for define in common_platform_defines if '"' not in define]
android.add_product_flavor(platform, abis = abi, ndk_flags = flags)
################################
def process_target(self, proj_props, target_name, task_generator):
android = self.android
ctx = proj_props.ctx
package_name = ''
if self.__parent.type == ModelType.Application:
game_project = getattr(task_generator, 'project_name', '')
package_name = ctx.get_android_package_name(game_project)
target_sdk = getattr(task_generator, 'sdk_version', ctx.env['ANDROID_SDK_VERSION_NUMBER'])
android.set_general_properties(
sdk_version = target_sdk,
build_tools_version = ctx.get_android_build_tools_version(),
application_id = package_name,
min_sdk = getattr(task_generator, 'min_sdk', ctx.env['ANDROID_NDK_PLATFORM_NUMBER']),
target_sdk = target_sdk
)
def _defines_to_flags(defines_list):
return ['-D%s' % define for define in defines_list if '"' not in define]
if not self.__parent.type == ModelType.Android_lib:
# process the task specific defines
common_defines = ctx.collect_task_gen_attrib(task_generator, 'defines')
debug_log_value('Common Defines', common_defines)
flags = _defines_to_flags(common_defines)
android.set_ndk_properties(
module_name = target_name,
ndk_flags = flags
)
for config in proj_props.configs:
config_defines = ctx.collect_task_gen_attrib(task_generator, '%s_defines' % config)
debug_log_value('{} Defines'.format(config.title()), config_defines)
if config_defines:
config_flags = _defines_to_flags(config_defines)
android.add_build_type(config, ndk_flags = config_flags)
for target in proj_props.target_abis.keys():
target_defines = ctx.collect_task_gen_attrib(task_generator, '%s_defines' % target)
debug_log_value('{} Defines'.format(target.title()), target_defines)
if target_defines:
target_flags = _defines_to_flags(target_defines)
android.add_product_flavor(target, ndk_flags = target_flags)
# process the source paths and native dependencies
jni_src_paths = ctx.extrapolate_src_paths(task_generator)
export_includes = ctx.collect_task_gen_attrib(task_generator, 'export_includes')
jni_export_paths = []
for exp_incl in export_includes:
real_path = exp_incl
if isinstance(exp_incl, Node.Node):
real_path = exp_incl.abspath()
elif not os.path.isabs(real_path):
relative_path = os.path.join(task_generator.path.abspath(), exp_incl)
real_path = os.path.normpath(relative_path)
# having anything reference the root SDKs or Tools directory
# will cause Android Studio to crash... :(
if os.path.basename(real_path) not in ['SDKs', 'Tools']:
jni_export_paths.append(real_path)
if export_includes and not jni_export_paths:
jni_export_paths = jni_src_paths
# android specific path handling
java_src_paths = []
aidl_src_paths = []
manifest_path = []
resource_src_path = []
# native only modules can't have any java / manifest / resources, so only gather them for
# applications and android libs
if self.__parent.type in (ModelType.Application, ModelType.Android_lib):
java_src_paths = ctx.collect_task_gen_attrib(task_generator, 'android_java_src_path')
aidl_src_paths = ctx.collect_task_gen_attrib(task_generator, 'android_aidl_src_path')
manifest_path = ctx.collect_task_gen_attrib(task_generator, 'android_manifest_path')
resource_src_path = ctx.collect_task_gen_attrib(task_generator, 'android_res_path')
# if we are a application, pull in all the java files from dependent modules so they get
# built correctly by android studio
if self.__parent.type == ModelType.Application:
game_project = getattr(task_generator, 'project_name', None)
for tsk_gen in ctx.project_tasks:
module_name = tsk_gen.target
# skip the launchers / same module, those source paths were already added above
if module_name.endswith('AndroidLauncher'):
continue
if ctx.is_module_for_game_project(module_name, game_project, None):
java_src_paths += ctx.collect_task_gen_attrib(tsk_gen, 'android_java_src_path')
aidl_src_paths += ctx.collect_task_gen_attrib(tsk_gen, 'android_aidl_src_path')
debug_log_value('Source Paths (java)', java_src_paths)
debug_log_value('Source Paths (aidl)', aidl_src_paths)
# for some reason get_module_uses doesn't work correctly on the
# launcher tasks, so we need to maunally create the use dependecy
# tree
all_native_uses = []
if self.__parent.type == ModelType.Application:
local_uses = ctx.collect_task_gen_attrib(task_generator, 'use')
all_native_uses = local_uses[:]
for use in local_uses:
all_native_uses.extend(ctx.get_module_uses(use, proj_props.project_spec))
elif self.__parent.type == ModelType.Native_lib:
all_native_uses = ctx.get_module_uses(target_name, proj_props.project_spec)
project_names = [ tgen.target for tgen in ctx.project_tasks ]
valid_deps = set(project_names).intersection(all_native_uses) # [project for project in current_deps if project[1:] in dependencies]
debug_log_value('Uses (native)', list(valid_deps))
android.set_main_source_paths(
java_src = java_src_paths,
aidl_src = aidl_src_paths,
jni_src = jni_src_paths,
jni_exports = jni_export_paths,
jni_dependencies = valid_deps,
manifest_path = manifest_path,
res_src = resource_src_path
)
################################
def validate_and_set_project_dependencies(self, project_modules):
self.android.validate_and_set_main_dependencies(project_modules)
################################
class GradleContainer(GradleNode):
################################
def __init__(self):
self.nodes = {}
################################
def __getattr__(self, name):
return self.nodes[name]
################################
def write(self, stream, indent = 0):
return super(GradleContainer, self).write(stream, indent - 1)
################################
class Module(GradleContainer):
'''
Represents a Module in Android Studio
'''
################################
def __init__(self, type):
super(Module, self).__init__()
self.__type = type
################################
def write(self, stream, indent = 0):
indent_text('apply from: "../waf.gradle"', indent, stream)
if self.__type == ModelType.Application:
plugin__type = 'application'
elif self.__type == ModelType.Android_lib:
plugin__type = 'library'
else:
plugin__type = 'native'
indent_text('apply plugin: "com.android.model.%s"\n\n' % plugin__type, indent, stream)
super(Module, self).write(stream, indent)
stream.write('\n')
################################
def process_module_dependencies(self, project, task_generator):
deps = ModuleDependencies()
addNode = False
modules = project.ctx.collect_task_gen_attrib(task_generator, 'modules_dependency')
if modules:
deps.add_compile_projects(*modules)
addNode = True
files = project.ctx.collect_task_gen_attrib(task_generator, 'files_dependency')
if files:
deps.add_compile_files(*files)
addNode = True
# Look for the android specific uses
if self.__type == ModelType.Application:
def _get_task_gen(task_name):
try:
return project.ctx.get_tgen_by_name(task_name)
except:
return None
launcher_name = getattr(task_generator, 'project_name', '')
apk_task_name = '{}_APK'.format(launcher_name)
apk_task = _get_task_gen(apk_task_name)
if apk_task:
uses_added = []
uses = project.ctx.collect_task_gen_attrib(apk_task, 'use')
for use in uses:
use_task_gen = _get_task_gen(use)
if use_task_gen:
use_task_gen.post()
if not hasattr(use_task_gen, 'aar_task'):
continue
android_studio_name = getattr(use_task_gen, 'android_studio_name', None)
if android_studio_name:
deps.add_compile_libraries(android_studio_name)
uses_added.append(android_studio_name)
else:
deps.add_compile_projects(use)
uses_added.append(use)
addNode = True
debug_log_value('Uses (android)', uses_added)
if addNode:
self.addNode(deps, deps.gradle_name)
################################
def addNode(self, node, name):
self.nodes[name] = node
################################
@property
def type(self):
return self.__type
################################
@type.setter
def type(self, value):
self.__type = value
################################
@type.deleter
def type(self):
del self.__type
################################
class ModuleDependencies(GradleNode):
'''
Represents a Module dependency
'''
gradle_name = 'dependencies'
################################
class _Dependency:
'''
Helper class to write the dependency in the proper way.
'''
################################
def __init__(self, name, type = None):
self.name = name
self.type = type
################################
def __str__(self):
if self.type:
return '{}("{}")'.format(self.type, self.name)
else:
return '"{}"'.format(self.name)
################################
def __nonzero__(self):
return any([self.name, self.type])
################################
def __init__(self):
self.compile = []
################################
def __nonzero__(self):
return True if self.compile else False
################################
def clear_compile_projects(self):
del self.compile[:]
################################
def add_compile_projects(self, *projects):
deps = [self._Dependency(':' + proj, 'project') for proj in projects]
self.compile.extend(deps)
################################
def add_compile_libraries(self, *libraries):
deps = [self._Dependency(lib) for lib in libraries]
self.compile.extend(deps)
################################
def add_compile_files(self, *files):
deps = [self._Dependency(file, 'files') for file in files]
self.compile.extend(deps)
################################################################
class AndroidStudioProject:
################################
def __init__(self, ctx):
self.projects = {}
self.configs = []
self.target_abis = {}
self.target_defines = {}
self.project_spec = ''
self.ctx = ctx
################################
def set_project_spec(self, project_spec):
self.project_spec = project_spec
################################
def set_platforms_and_configs(self, ctx, target_abis, configs):
self.configs = configs
self.target_abis = target_abis
# collect all the configuration defines and filter out the
# common ones based on the target platform
for target, abi in target_abis.items():
config_lists = []
for config in configs:
env_name = '%s_%s' % (target, config)
env = ctx.all_envs[env_name]
config_lists.append(env['DEFINES'])
self.target_defines[target] = set(config_lists[0]).intersection(*config_lists[1:])
################################
def add_target_to_project(self, project_name, type, project_task_gen):
Logs.debug('android_studio: Added Module - {} - to project'.format(project_name))
android_module = Module(type)
android_model = Model(android_module)
android_model.apply_platform_configs(self.ctx, self.target_abis, self.target_defines, self.configs)
android_model.process_target(self, project_name, project_task_gen)
android_module.addNode(android_model, android_model.gradle_name)
android_module.process_module_dependencies(self, project_task_gen)
self.projects[project_name] = android_module
################################
def write_project(self, root_node):
# generate the include file for gradle
gradle_settings = root_node.make_node('settings.gradle')
added_targets = sorted(self.projects.keys())
try:
with open(gradle_settings.abspath(), 'w') as settings_file:
inject_auto_gen_header(settings_file.write)
for target in added_targets:
settings_file.write('include ":%s"\n' % target)
except Exception as err:
self.ctx.fatal(str(err))
# generate all the project build.gradle files
for target_name, project in self.projects.items():
project.model.validate_and_set_project_dependencies(added_targets)
module_dir = root_node.make_node(target_name)
module_dir.mkdir()
module_node = module_dir.make_node('build.gradle')
try:
with open(module_node.abspath(), 'w') as build_file:
inject_auto_gen_header(build_file.write)
project.write(build_file)
if not project.type == ModelType.Android_lib:
if project.type == ModelType.Native_lib:
build_file.write(TASK_GEN_HEADER % '--targets=${project.name}')
else:
cmd_arg = '--enabled-game-projects=%s' % target_name.replace('Launcher', '')
build_file.write(TASK_GEN_HEADER % cmd_arg)
build_file.write(COPY_TASK_GEN)
build_file.write(TASK_GEN_FOOTER)
except Exception as err:
self.ctx.fatal(str(err))
################################################################
################################################################
def options(opt):
group = opt.add_option_group('android-studio config')
# disables the apk packaging process so android studio can do it
group.add_option('--from-android-studio', dest = 'from_android_studio', action = 'store_true', default = False, help = 'INTERNAL USE ONLY for Experimental Andorid Studio support')
################################################################
class android_studio(Build.BuildContext):
cmd = 'android_studio'
################################
def get_target_platforms(self):
"""
Used in cryengine_modules get_platform_list during project generation
"""
android_targets = [ target for target in self.get_supported_platforms() if 'android' in target ]
return [ 'android' ] + android_targets
################################
def collect_task_gen_attrib(self, task_generator, attribute, *modifiers):
"""
Helper for getting an attribute from the task gen with optional modifiers
"""
result = to_list(getattr(task_generator, attribute, []))
for mod in modifiers:
mod_attrib = '%s_%s' % (mod, attribute)
result.extend(to_list(getattr(task_generator, mod_attrib, [])))
return result
################################
def flatten_tree(self, path_root, obj):
"""
Helper for getting all the leaf nodes in a dictionary
"""
elems = []
if isinstance(obj, dict):
for key, value in obj.items():
elems.extend(self.flatten_tree(path_root, value))
elif isinstance(obj, list):
for value in obj:
elems.extend(self.flatten_tree(path_root, value))
else:
file_path = os.path.join(path_root, obj)
elems.append(os.path.normpath(file_path))
return elems
################################
def collect_source_from_file_list(self, path_node, file_list):
"""
Wrapper for sourcing the file list json data and flattening the
source files into a list containing files with absolute paths
"""
file_node = path_node.find_node(file_list)
src_json = self.parse_json_file(file_node)
return self.flatten_tree(path_node.abspath(), src_json)
################################
def extrapolate_src_paths(self, task_generator):
"""
By using the file lists independently along with some additional filtering
we can make a fairly accurate guess at the root directories to include in
the project at the narrowist scope possible. This is important due to Android
Studio blind file tree includes for native source and will choke on large
directory structures.
"""
file_lists = self.collect_task_gen_attrib(task_generator, 'file_list')
file_lists = set(file_lists) # remove the duplicates, only seems to happen for some modules and only the file_list
exec_path = task_generator.path.abspath()
include_paths = []
for file_list in file_lists:
debug_log_value('File List (native)', file_list)
src_files = self.collect_source_from_file_list(task_generator.path, file_list)
paths_to_add = []
# first process the raw source, no external files
external_filter = ['SDKs', 'Tools']
normal_src = [src_path for src_path in src_files if not any(sub_dir for sub_dir in external_filter if sub_dir in src_path)]
if normal_src:
common_path = find_common_path(normal_src)
debug_log_value('Source Path (native)', common_path)
# attempt to determine which path is the narrower scope
if common_path in exec_path:
paths_to_add.append(exec_path)
else:
paths_to_add.append(common_path)
# next process the files from SDKs to narrow their include directory
sdk_src = [src_path for src_path in src_files if 'SDKs' in src_path]
if sdk_src:
common_path = find_common_path(sdk_src)
paths_to_add.append(common_path)
debug_log_value('SDK Path (native)', common_path)
# same process as the SDKs for the files from Tools
tools_src = [src_path for src_path in src_files if 'Tools' in src_path]
if tools_src:
common_path = find_common_path(tools_src)
paths_to_add.append(common_path)
debug_log_value('Tool Path (native)', common_path)
# filter the possible paths to add again to prevent adding duplicate paths
# or sub directories to paths already added.
for add_path in paths_to_add:
# This check prevents the widening of the source search path for
# android studio. We need the narrowist scope possible to ensure
# duplicates don't get includes across modules and to prevent
# android studio from crashing for tryng to parse larget directories
# like Code/SDKs and Code/Tools
if not ( any(incl for incl in include_paths if add_path in incl)
or any(incl for incl in include_paths if incl in add_path)):
include_paths.append(add_path)
debug_log_value('Common Paths (native)', include_paths)
return include_paths
################################
def execute(self):
"""
Entry point of the project generation
"""
# restore the environments
self.restore()
if not self.all_envs:
self.load_envs()
self.load_user_settings()
Logs.info("[WAF] Executing 'android_studio' in '%s'" % (self.variant_dir) )
self.recurse([ self.run_dir ])
# check the apk signing environment
if self.get_android_build_environment() == 'Distribution':
Logs.warn('[WARN] The Distribution build environment is not currently supported in Android Studio, falling back to the Development build environment.')
# create the root project directory
android_root = self.path.make_node(self.get_android_project_relative_path())
if not os.path.exists(android_root.abspath()):
Logs.error('[ERROR] Base android projects not generated. Re-run the configure command.')
return
# generate the root gradle build script
root_build = android_root.make_node('build.gradle')
try:
with open(root_build.abspath(), 'w') as root_build_file:
inject_auto_gen_header(root_build_file.write)
root_build_file.write(PROJECT_BUILD_TEMPLATE)
except Exception as err:
self.fatal(str(err))
# get the core build settings
project = AndroidStudioProject(self)
android_platforms = [platform for platform in self.get_supported_platforms() if 'android' in platform]
android_config_sets = []
for platform in android_platforms:
android_config_sets.append(set(config for config in self.get_supported_configurations(platform) if not config.endswith('_dedicated')))
android_configs = list(set.intersection(*android_config_sets))
android_abis = {}
for android_target in android_platforms:
abi_func = getattr(self, 'get_%s_target_abi' % android_target, None)
if abi_func:
android_abis[android_target] = abi_func()
project.set_platforms_and_configs(self, android_abis, android_configs)
# collect all the modules
modules = []
project_spec = (self.options.project_spec if self.options.project_spec else 'all')
project.set_project_spec(project_spec)
modules = self.spec_modules(project_spec)[:]
for project_name in self.get_enabled_game_project_list():
modules.extend(self.project_and_platform_modules(project_name, android_platforms))
acceptable_platforms = ['android', 'all'] + android_platforms
# first find all the modules that are going to be added to the android studio project
tasks_to_add = []
for group in self.groups:
for task_generator in group:
if not isinstance(task_generator, TaskGen.task_gen):
continue
target_name = task_generator.target
task_platforms = self.get_module_platforms(target_name)
if target_name not in modules:
Logs.debug('android_studio: Skipped Module - %s - is not part of the spec' % target_name)
continue
if not any(target in task_platforms for target in acceptable_platforms):
Logs.debug('android_studio: Skipped Module - %s - is not targeted for Andorid. Targeted for %s' % (target_name, task_platforms))
continue
# remove the non-AndroidLaunchers from the project
if target_name.endswith('Launcher') and not target_name.endswith('AndroidLauncher'):
Logs.debug('android_studio: Skipped Module - %s - is not an AndroidLauncher' % target_name)
continue
# filter out incorrectly configured game projects and their respective launchers
game_project = getattr(task_generator, 'project_name', target_name)
if game_project in self.get_enabled_game_project_list():
if not self.get_android_settings(game_project):
Logs.debug('android_studio: Skipped Module - %s - is a game project not configured for Android' % target_name)
continue
tasks_to_add.append(task_generator)
self.project_tasks = tasks_to_add
# process the modules to be added to the project
for task_generator in tasks_to_add:
target_name = task_generator.target
game_project = getattr(task_generator, 'project_name', target_name)
module_type = ModelType.Native_lib
if game_project in self.get_enabled_game_project_list() and game_project is not target_name:
target_name = self.get_executable_name(game_project)
module_type = ModelType.Application
task_generator.post()
self.process_module(project, target_name, android_root, module_type, task_generator)
project.write_project(android_root)
# generate the gradle waf support file
waf_tools = android_root.make_node('waf.gradle')
try:
with open(waf_tools.abspath(), 'w') as waf_tools_file:
inject_auto_gen_header(waf_tools_file.write)
waf_tools_file.write(WAF_GRADLE_TASK)
indent_text('project.ext { ', 0, waf_tools_file)
indent_text('engineRoot = "${rootDir}/%s/"' % self.path.path_from(android_root).replace('\\', '/'), 1, waf_tools_file)
indent_text('binTempRoot = "${engineRoot}/BinTemp/"', 1, waf_tools_file)
platforms_string = pformat(android_platforms).replace("'", '"')
indent_text('platforms = %s' % platforms_string, 1, waf_tools_file)
configs_string = pformat(android_configs).replace("'", '"')
indent_text('configurations = %s' % configs_string, 1, waf_tools_file)
indent_text('androidBinMap = [', 1, waf_tools_file)
for platform in android_platforms:
indent_text('"%s" : [' % platform, 2, waf_tools_file)
for config in android_configs:
indent_text('"%s" : "%s",' % (config, self.get_output_folders(platform, config)[0]), 3, waf_tools_file)
indent_text('],', 2, waf_tools_file)
indent_text(']', 1, waf_tools_file)
indent_text('} ', 0, waf_tools_file)
except Exception as err:
self.fatal(str(err))
# generate the android local properties file
local_props = android_root.make_node('local.properties')
try:
with open(local_props.abspath(), 'w') as props_file:
sdk_path = os.path.normpath(self.get_env_file_var('LY_ANDROID_SDK'))
ndk_path = os.path.normpath(self.get_env_file_var('LY_ANDROID_NDK'))
# windows is really picky about it's file paths
if Utils.unversioned_sys_platform() == "win32":
sdk_path = sdk_path.replace('\\', '\\\\').replace(':', '\\:')
ndk_path = ndk_path.replace('\\', '\\\\').replace(':', '\\:')
props_file.write('sdk.dir={}\n'.format(sdk_path))
props_file.write('ndk.dir={}\n'.format(ndk_path))
except Exception as err:
self.fatal(str(err))
# generate the gradle properties file
gradle_props = android_root.make_node('gradle.properties')
try:
with open(gradle_props.abspath(), 'w') as props_file:
props_file.write(GRADLE_PROPERTIES)
except Exception as err:
self.fatal(str(err))
Logs.pprint('CYAN','[INFO] Created at %s' % android_root.abspath())
################################
def process_module(self, project, target_name, android_root, module_type, task_generator):
'''
Adds the module to the project. If it's an application, then it parse the json file for
the Android Libraries and adds those modules to the project also.
'''
if module_type == ModelType.Application:
class _DummyTaskGenerator(object):
def set_task_attribute(self, name, attr):
if attr:
setattr(self, name, attr)
# Generate all the targets for the Android libraries
java_libs_json = self.root.make_node(getattr(task_generator, 'android_java_libs', []))
json_data = self.parse_json_file(java_libs_json)
if json_data:
app_dependancy = []
for libName, value in json_data.iteritems():
new_task_generator = _DummyTaskGenerator()
# Check if the library was patched. If so, we need to look in a different folder.
if 'patches' in value:
lib_path = os.path.join(project.ctx.Path(project.ctx.get_android_patched_libraries_relative_path()), libName)
else:
# Search the multiple library paths where the library can be.
lib_path = None
for path in value['srcDir']:
path = string.Template(path).substitute(self.env)
path = path.replace('\\', '/')
if os.path.exists(path):
lib_path = path
break
if not lib_path:
project.ctx.fatal('[ERROR] Failed to find library - %s - in path(s) [%s]. Please download the library from the Android SDK Manager and run the configure command again.'
% (libName, ", ".join(string.Template(path).substitute(self.env) for path in value['srcDir'])))
new_task_generator.set_task_attribute('path', self.path)
new_task_generator.set_task_attribute('android_java_src_path', os.path.join(lib_path, 'src'))
new_task_generator.set_task_attribute('android_res_path', os.path.join(lib_path, 'res'))
new_task_generator.set_task_attribute('android_manifest_path', lib_path)
new_task_generator.set_task_attribute('modules_dependency', value.get('dependancy', None))
if value.get('launcherDependency'):
app_dependancy.append(libName)
if value.get('libs'):
# Get any java libs that are needed
files_dep = []
for java_lib in value['libs']:
file_path = string.Template(java_lib['path']).substitute(self.env)
file_path = file_path.replace('\\', '/')
if os.path.exists(file_path):
files_dep.append(file_path)
elif java_lib['required']:
project.ctx.fatal("[ERROR] Required java lib [%s] was not found" % file_path)
new_task_generator.set_task_attribute('files_dependency', files_dep)
project.add_target_to_project(libName, ModelType.Android_lib, new_task_generator)
setattr(task_generator, 'modules_dependency', app_dependancy)
project.add_target_to_project(target_name, module_type, task_generator)
|
the-stack_106_17810
|
# -*- coding: utf-8 -*-
import sys
import logging
try:
from cStringIO import StringIO # NOQA
except ImportError:
from io import StringIO # NOQA
try:
import importlib # NOQA
except ImportError:
from django.utils import importlib # NOQA
from django.core.management import call_command
from django.test import TestCase
class MockLoggingHandler(logging.Handler):
""" Mock logging handler to check for expected logs. """
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class CommandTest(TestCase):
def test_error_logging(self):
# Ensure command errors are properly logged and reraised
from django_extensions.management.base import logger
logger.addHandler(MockLoggingHandler())
module_path = "django_extensions.tests.management.commands.error_raising_command"
module = importlib.import_module(module_path)
error_raising_command = module.Command()
self.assertRaises(Exception, error_raising_command.execute)
handler = logger.handlers[0]
self.assertEqual(len(handler.messages['error']), 1)
class ShowTemplateTagsTests(TestCase):
def test_some_output(self):
out = StringIO()
call_command('show_templatetags', stdout=out)
output = out.getvalue()
# Once django_extension is installed during tests it should appear with
# its templatetags
self.assertIn('django_extensions', output)
# let's check at least one
self.assertIn('truncate_letters', output)
class UpdatePermissionsTests(TestCase):
def test_works(self):
from django.db import models
class PermModel(models.Model):
class Meta:
app_label = 'django_extensions'
permissions = (('test_permission', 'test_permission'),)
original_stdout = sys.stdout
out = sys.stdout = StringIO()
call_command('update_permissions', stdout=out, verbosity=3)
sys.stdout = original_stdout
self.assertIn("Can change perm model", out.getvalue())
class CommandSignalTests(TestCase):
pre = None
post = None
def test_works(self):
from django_extensions.management.signals import post_command, \
pre_command
from django_extensions.management.commands.show_templatetags import \
Command
def pre(sender, **kwargs):
CommandSignalTests.pre = dict(**kwargs)
def post(sender, **kwargs):
CommandSignalTests.post = dict(**kwargs)
pre_command.connect(pre, Command)
post_command.connect(post, Command)
out = StringIO()
call_command('show_templatetags', stdout=out)
self.assertIn('args', CommandSignalTests.pre)
self.assertIn('kwargs', CommandSignalTests.pre)
self.assertIn('args', CommandSignalTests.post)
self.assertIn('kwargs', CommandSignalTests.post)
self.assertIn('outcome', CommandSignalTests.post)
|
the-stack_106_17813
|
"""Data anylisis of a restaurant order data"""
data = [
{'order_id': '355c96f5-944e-4ef6-977b-6972df7b8f93', 'price': 3000,
'customer': 'Gerrie Killshaw', 'type': 'takeaway', 'district': None, 'note': None, 'review': 5},
{'order_id': 'ebc2b077-d18a-492d-86b7-81756205fe29', 'price': 2100, 'customer': 'Kristan Northway',
'type': 'dine-in', 'district': None, 'note': None, 'review': None},
{'order_id': '68a74baf-03fe-46ca-a3c4-88972a43efbc', 'price': 3400, 'customer': 'Jodee Spinas',
'type': 'takeaway', 'district': None, 'note': None, 'review': None},
{'order_id': 'e53b4a58-c189-4bbb-b549-99bdbe0370f9', 'price': 3400,
'customer': 'Rozalie Henrique', 'type': 'dine-in', 'district': None, 'note': None, 'review': 5},
{'order_id': '47afe099-8744-42e9-971b-0a9e3c3f11c6', 'price': 3500, 'customer': 'Ignacius Broadis',
'type': 'delivery', 'district': 'VIII', 'note': None, 'review': 4},
{'order_id': '187b9e0c-4f36-458b-8fe9-d206a6844b11', 'price': 2000,
'customer': 'Bearnard Allchin', 'type': 'takeaway', 'district': None, 'note': None, 'review': 1},
{'order_id': '98ce7a47-6405-4b90-a71e-34b4272774bc', 'price': 2600,
'customer': 'Denis Mc Caughan', 'type': 'dine-in', 'district': None, 'note': None, 'review': 3},
{'order_id': 'f1d937d3-c1e2-4569-8a99-187fe3f65c6e', 'price': 1400, 'customer': 'Batsheva Emanuelov',
'type': 'takeaway', 'district': None, 'note': None, 'review': None},
{'order_id': 'e875317e-94f2-417a-bff7-5ae783087ab0', 'price': 2400,
'customer': 'Yevette Lewisham', 'type': 'takeaway', 'district': None, 'note': None, 'review': 3},
{'order_id': '89901429-1ebf-490e-a9e0-75c3f9c4ce0d', 'price': 11000,
'customer': 'Ramona Norvell', 'type': 'dine-in', 'district': None, 'note': None, 'review': 2},
{'order_id': '272af23c-bfe9-4228-ac7c-ace5c4d6271a', 'price': 1400, 'customer': 'Blondell Klimko',
'type': 'dine-in', 'district': None, 'note': 'ketchup', 'review': 3},
{'order_id': 'd686e01d-2ca9-45f4-ba11-1a3556a5981e', 'price': 3100,
'customer': 'Becki Mettericke', 'type': 'dine-in', 'district': None, 'note': None, 'review': 2},
{'order_id': '855efeac-c6f0-4a4b-a5ae-b492db5354e0', 'price': 2000, 'customer': 'Maura Aspey',
'type': 'delivery', 'district': 'VIII', 'note': None, 'review': None},
{'order_id': '2fa0a6ca-172f-47ff-b8c8-67e6e99d0d74', 'price': 1300,
'customer': 'Babara Bolitho', 'type': 'takeaway', 'district': None, 'note': None, 'review': 3},
{'order_id': '8ffa92ba-e5b7-419b-9d73-c57cb13d74a7', 'price': 2600, 'customer': 'Kenneth Southby',
'type': 'delivery', 'district': 'VII', 'note': None, 'review': 4},
{'order_id': 'b55ddbc1-e841-46c0-9ff4-395fdd2b4935', 'price': 1300,
'customer': 'Grantham Bolley', 'type': 'takeaway', 'district': None, 'note': None, 'review': 1},
{'order_id': '762505e8-5704-411e-8000-5b029d4d9103', 'price': 12000,
'customer': 'Lauraine Boss', 'type': 'delivery', 'district': 'VI', 'note': None, 'review': 3},
{'order_id': 'e9f475fa-f04c-4ef9-a75f-f28bc128b44a', 'price': 2700, 'customer': 'Ivette McElmurray',
'type': 'dine-in', 'district': None, 'note': None, 'review': None},
{'order_id': '192004d5-5708-489a-9697-3a43537a2ec9', 'price': 11100,
'customer': 'Cecilius Downage', 'type': 'dine-in', 'district': None, 'note': None, 'review': 3},
{'order_id': '007e649f-99ef-4cda-942a-ba7e636c95d6', 'price': 2200, 'customer': 'Winne Spurgin',
'type': 'dine-in', 'district': None, 'note': 'ketchup', 'review': 2},
{'order_id': '516d6688-182d-45a9-bab6-e6e8a972def9', 'price': 3400, 'customer': 'Marlie Towlson',
'type': 'takeaway', 'district': None, 'note': 'több saláta', 'review': None},
{'order_id': 'd2e0854e-fe17-4bfa-88d3-aced4e249c71', 'price': 2100,
'customer': 'Shea Fance', 'type': 'dine-in', 'district': None, 'note': None, 'review': 1},
{'order_id': 'f5f05bfd-c04e-49fa-ae76-1e0dbe3756ad', 'price': 3200,
'customer': 'Ivan Walrond', 'type': 'delivery', 'district': 'VI', 'note': None, 'review': 3},
{'order_id': '316984c0-8340-469d-9a7f-44e103bd9feb', 'price': 1800,
'customer': 'Lauraine Boss', 'type': 'dine-in', 'district': None, 'note': None, 'review': 1},
{'order_id': '112fddf6-a32a-4f06-9e16-a18282a3ed45', 'price': 9600, 'customer': 'Cathe Triner',
'type': 'delivery', 'district': 'XIV', 'note': None, 'review': None},
{'order_id': 'a08d59be-1a36-4f26-a831-4b5c65a8f304', 'price': 6200, 'customer': 'Quintina Belderfield',
'type': 'takeaway', 'district': None, 'note': None, 'review': None},
{'order_id': 'dde29033-eb2d-4de9-a8ac-80d177729531', 'price': 1400,
'customer': 'Lexine Padbery', 'type': 'delivery', 'district': 'VI', 'note': None, 'review': 4},
{'order_id': '3cc7d813-49a2-46bb-8e75-e2304cdb3a3f', 'price': 9900, 'customer': 'Hillie Tanton',
'type': 'dine-in', 'district': None, 'note': None, 'review': None},
{'order_id': 'b58f63f4-7d72-4a45-9309-83702ba4163d', 'price': 2800, 'customer': 'Ag Sabathe',
'type': 'delivery', 'district': 'VII', 'note': 'hagyma nélkül', 'review': None},
{'order_id': '03e3ac5f-2795-4147-9732-a6ff11066c44', 'price': 6800, 'customer': 'Sybilla Albin',
'type': 'delivery', 'district': 'VI', 'note': None, 'review': None},
{'order_id': 'e512e8b3-849c-4c09-8b78-1009e3eeb407', 'price': 1700, 'customer': 'Hubert Vereker',
'type': 'takeaway', 'district': None, 'note': 'több saláta', 'review': 2},
{'order_id': '73a0ddbb-134d-4bb3-aa29-482018ba8471', 'price': 1600, 'customer': 'Courtnay Sillick',
'type': 'takeaway', 'district': None, 'note': None, 'review': None},
{'order_id': '097b5fd8-b2fe-4b5b-a486-4cb2abb105a8', 'price': 9400,
'customer': 'Sibel Fanthom', 'type': 'dine-in', 'district': None, 'note': None, 'review': 5},
{'order_id': '8b638f8b-b587-4c73-9d4d-328dcc14fe1f', 'price': 3100, 'customer': 'Kiersten McPeeters',
'type': 'takeaway', 'district': None, 'note': None, 'review': None},
{'order_id': '247b6998-e66d-4843-9288-e0face7319a0', 'price': 1900, 'customer': 'Emory Meader',
'type': 'delivery', 'district': 'VIII', 'note': None, 'review': None},
{'order_id': 'b2ca2f74-7137-47af-80d2-b6e8ebc21bcd', 'price': 6800, 'customer': 'Upton Scroxton',
'type': 'dine-in', 'district': None, 'note': None, 'review': None},
{'order_id': '56b91e64-4692-4aeb-b54f-6e6c206b6ad8', 'price': 2400,
'customer': 'Phaidra Etoile', 'type': 'dine-in', 'district': None, 'note': None, 'review': 3},
{'order_id': 'c9d3af1a-ae22-46a5-b583-b0c22bd42d60', 'price': 6900,
'customer': 'Gloriane Dumbarton', 'type': 'dine-in', 'district': None, 'note': None, 'review': 1},
{'order_id': '47474c8a-58c1-4710-8e92-0f19ebb17b77', 'price': 11200,
'customer': 'Dusty Curton', 'type': 'delivery', 'district': 'VI', 'note': None, 'review': 1},
{'order_id': '6d76cbaf-dd9e-4953-a2ff-3d2719977367', 'price': 9600,
'customer': 'Raven McKeaney', 'type': 'takeaway', 'district': None, 'note': None, 'review': 3},
{'order_id': 'c2ca011a-0606-45dd-90f1-cfd5895cab8b', 'price': 8100,
'customer': 'Caril Olsen', 'type': 'dine-in', 'district': None, 'note': None, 'review': 2},
{'order_id': 'ca055d4f-f2b5-45cf-8155-a36b1a518c25', 'price': 3300,
'customer': 'Yvonne Ricci', 'type': 'delivery', 'district': 'VI', 'note': None, 'review': 4},
{'order_id': '569b7e04-5aee-4b44-905f-94adbb2ee2aa', 'price': 2400, 'customer': 'Donielle Teather',
'type': 'takeaway', 'district': None, 'note': 'hagyma nélkül', 'review': 1},
{'order_id': 'e6424af3-4f47-407a-a31a-2f3c4d466e30', 'price': 2100,
'customer': 'Shellysheldon Cristofori', 'type': 'dine-in', 'district': None, 'note': None, 'review': 2},
{'order_id': '464929c1-462b-4a20-910f-3b8d2232e806', 'price': 4100,
'customer': 'Alonso Butter', 'type': 'delivery', 'district': 'VI', 'note': None, 'review': 4},
{'order_id': '6a6f6ee5-9188-47f4-a7d8-59d72f176981', 'price': 3400,
'customer': 'Olivette Aronsohn', 'type': 'takeaway', 'district': None, 'note': None, 'review': 3},
{'order_id': '96d674fc-9bd1-40c2-8d15-3983389e8e4b', 'price': 8400,
'customer': 'Tommie Jendrich', 'type': 'dine-in', 'district': None, 'note': None, 'review': 4},
{'order_id': 'be27358d-389a-4f59-9eb6-2b35a0c45de1', 'price': 1800,
'customer': 'Astrid Robens', 'type': 'takeaway', 'district': None, 'note': None, 'review': 4},
{'order_id': 'fef27b8f-ca1e-4dfe-9bc3-5fb20974bed2', 'price': 7900, 'customer': 'Gaby McChruiter',
'type': 'dine-in', 'district': None, 'note': None, 'review': None},
{'order_id': '1844074a-7082-4c36-85c9-ea0817a10678', 'price': 1800, 'customer': 'Carey Labroue',
'type': 'delivery', 'district': 'VII', 'note': None, 'review': 2},
{'order_id': '121aa53d-365b-42d4-9b99-cc2f5b22a412', 'price': 6200, 'customer': 'Adina Chalice',
'type': 'takeaway', 'district': None, 'note': 'több saláta', 'review': 4},
{'order_id': 'dd30091d-4d28-473d-b915-adfea304d4d3', 'price': 2700, 'customer': 'Bennett Prando',
'type': 'takeaway', 'district': None, 'note': 'hagyma nélkül', 'review': 4},
{'order_id': 'c435ab18-22ee-4d4e-87b6-865a4a5bca11', 'price': 1700,
'customer': 'Gaylor Scafe', 'type': 'dine-in', 'district': None, 'note': None, 'review': 3},
{'order_id': '9426ed24-0c2f-4b49-a23e-1c091247bd89', 'price': 1900,
'customer': 'Cristobal Tremmil', 'type': 'takeaway', 'district': None, 'note': None, 'review': 4},
{'order_id': 'c9ffbf30-0835-4420-aeca-8f89ebf24285', 'price': 3200, 'customer': 'Serena Woolaghan',
'type': 'dine-in', 'district': None, 'note': 'hagyma nélkül', 'review': None},
{'order_id': '0938b4df-ce2e-4b54-a971-165d9f426e2a', 'price': 11600, 'customer': 'Demetris Herrema',
'type': 'delivery', 'district': 'VIII', 'note': None, 'review': 4},
{'order_id': '3dcfe363-41d8-4442-a644-aa26c027a6d1', 'price': 4400, 'customer': 'Amalle Briscam',
'type': 'takeaway', 'district': None, 'note': 'több saláta', 'review': None},
{'order_id': '54c42050-68b9-4ade-b8bc-8fe2e72750bd', 'price': 2400,
'customer': 'Bax Lovegrove', 'type': 'dine-in', 'district': None, 'note': None, 'review': 4},
{'order_id': 'd430e521-a26f-4dbe-bc61-4b24c716653d', 'price': 12100,
'customer': 'Noe Ruggen', 'type': 'takeaway', 'district': None, 'note': None, 'review': 1},
{'order_id': '55277530-5de3-4910-a835-7cf54345fd03', 'price': 2100, 'customer': 'Mara Hasslocher',
'type': 'delivery', 'district': 'VIII', 'note': None, 'review': None},
{'order_id': '118ec29e-ed50-406b-8d19-ed3eba2467e2', 'price': 3500, 'customer': 'Jareb Allsup',
'type': 'dine-in', 'district': None, 'note': 'több saláta', 'review': 5},
{'order_id': 'b090cb47-f597-400e-b92b-776b7bc2f25e', 'price': 3200, 'customer': 'Mathilde Dudeney',
'type': 'delivery', 'district': 'VI', 'note': 'több hús', 'review': None},
{'order_id': '5901e0c5-6853-4e97-9306-9727b8966fe3', 'price': 2900,
'customer': 'Sibel Fanthom', 'type': 'delivery', 'district': 'VI', 'note': None, 'review': 5},
{'order_id': '1d7aaa77-1320-42b8-b20a-a675992c5abe', 'price': 6600, 'customer': 'Remus Wynn',
'type': 'delivery', 'district': 'VI', 'note': 'hagyma nélkül', 'review': 1},
{'order_id': '589d21d2-9078-47ba-b5d9-180b40d9a71a', 'price': 3100, 'customer': 'Charlean Lamburne',
'type': 'dine-in', 'district': None, 'note': None, 'review': None},
{'order_id': '669a2604-202c-4349-b713-9e4a60588268', 'price': 4900,
'customer': 'Johny Adenot', 'type': 'dine-in', 'district': None, 'note': None, 'review': 1},
{'order_id': '7b972af5-c5bd-4d27-9daf-66d505e303b5', 'price': 1600, 'customer': 'Meier Englefield',
'type': 'delivery', 'district': 'VII', 'note': 'hagyma nélkül', 'review': 4},
{'order_id': 'bd70b570-33f8-4141-9aa9-2a96e22c3734', 'price': 5500, 'customer': 'Dicky Verrell',
'type': 'dine-in', 'district': None, 'note': 'több saláta', 'review': None},
{'order_id': '00894631-52e9-486e-8e4d-a0fd9ee7af7a', 'price': 2800,
'customer': 'Pippa Yare', 'type': 'dine-in', 'district': None, 'note': None, 'review': 2},
{'order_id': 'ca121a8f-5304-4ebd-b8ad-11d417c37f83', 'price': 5500, 'customer': 'Bennett Goosnell',
'type': 'delivery', 'district': 'VII', 'note': None, 'review': None},
{'order_id': 'a3403920-0a88-4c2c-9c75-5e17ad8611c1', 'price': 12000,
'customer': 'Stanislas Gercken', 'type': 'takeaway', 'district': None, 'note': None, 'review': 4},
{'order_id': '7aba235b-2554-4c0a-bef6-da81f2e1f6c2', 'price': 2000, 'customer': 'Arlina Frigout',
'type': 'dine-in', 'district': None, 'note': None, 'review': None},
{'order_id': '572ee99b-a690-44f4-9878-7fd50de815c4', 'price': 7300,
'customer': 'Joe Bendix', 'type': 'dine-in', 'district': None, 'note': None, 'review': 2},
{'order_id': '4a9146cd-ab82-46d0-b331-febada5f77e6', 'price': 4100, 'customer': 'Early Teacy',
'type': 'dine-in', 'district': None, 'note': 'több saláta', 'review': 5},
{'order_id': '6764ad73-75ad-43f9-82bf-8e2d98519977', 'price': 1300, 'customer': 'Kalvin Benjamin',
'type': 'takeaway', 'district': None, 'note': 'több saláta', 'review': 4},
{'order_id': '3fe615c2-caf0-4e06-adb3-c9634baab332', 'price': 4000,
'customer': 'Mallory Oris', 'type': 'dine-in', 'district': None, 'note': None, 'review': 5},
{'order_id': '0b193c82-fc3d-4cb2-8b6b-aa22be6ebe69', 'price': 2200, 'customer': 'Rob Searby',
'type': 'delivery', 'district': 'VIII', 'note': None, 'review': None},
{'order_id': '6ae03738-5cf4-423f-a024-a67dae3ff067', 'price': 6900,
'customer': 'Carmen Garfirth', 'type': 'dine-in', 'district': None, 'note': None, 'review': 5},
{'order_id': '77e70aa3-848c-4347-ad1d-8e6247348870', 'price': 12100,
'customer': 'Sela Gannicott', 'type': 'takeaway', 'district': None, 'note': None, 'review': 1},
{'order_id': '389afe2c-9f8b-4d50-8e18-79a9ad5f3ea4', 'price': 2300, 'customer': 'Heinrick Zanioletti',
'type': 'delivery', 'district': 'VI', 'note': 'ketchup', 'review': None},
{'order_id': '946031fd-c648-41e6-aeaf-517de87b7c9b', 'price': 4900, 'customer': 'Ewan Jehan',
'type': 'delivery', 'district': 'VII', 'note': None, 'review': 4},
{'order_id': 'e5113f07-1606-4eab-9914-4775b23a0efb', 'price': 1900,
'customer': 'Kevin Bartrap', 'type': 'dine-in', 'district': None, 'note': None, 'review': 4},
{'order_id': '44c6079e-7d1d-4d52-964d-a81def49f1de', 'price': 3100, 'customer': 'Delmor Trett',
'type': 'delivery', 'district': 'VI', 'note': 'hagyma nélkül', 'review': 4},
{'order_id': '687caf8b-51fe-47f8-bdf5-c4cd5599b7ef', 'price': 11900,
'customer': 'Bradly Dillinger', 'type': 'takeaway', 'district': None, 'note': None, 'review': 3},
{'order_id': '67936966-32d5-4e60-a355-05cf347e9910', 'price': 2400,
'customer': 'Mallory Oris', 'type': 'takeaway', 'district': None, 'note': None, 'review': 3},
{'order_id': 'df545257-c4e6-4a90-b784-c94596d6c1aa', 'price': 4200, 'customer': 'Elianore Pittoli',
'type': 'takeaway', 'district': None, 'note': None, 'review': None},
{'order_id': '0759e5c0-44c9-46e4-8f18-3609039a3985', 'price': 7700, 'customer': 'Mavra Sherington',
'type': 'delivery', 'district': 'XIV', 'note': None, 'review': 3},
{'order_id': '77182682-0da0-4d5f-9d9c-3d1375adcf85', 'price': 1700,
'customer': 'Roman Terzi', 'type': 'delivery', 'district': 'VI', 'note': None, 'review': 3},
{'order_id': 'a470f976-15a4-436f-9c5a-d3525e3a86e5', 'price': 2300,
'customer': 'Stephen Pratten', 'type': 'takeaway', 'district': None, 'note': None, 'review': 4},
{'order_id': 'e3ab88e3-fd6c-44c8-8d34-e25ed7989396', 'price': 2600, 'customer': 'Sarajane Armall',
'type': 'delivery', 'district': 'VII', 'note': None, 'review': 3},
{'order_id': 'a24dc7a5-851d-44ea-a50b-4856a4007fdc', 'price': 3300,
'customer': 'Carmen Garfirth', 'type': 'takeaway', 'district': None, 'note': None, 'review': 4},
{'order_id': '2f720703-3748-49a3-8f2e-393d43eec633', 'price': 3000, 'customer': 'Edin Truckell',
'type': 'delivery', 'district': 'VI', 'note': 'több saláta', 'review': 3},
{'order_id': '42231dab-595c-4b24-a9dc-842d7de8a9c4', 'price': 2400,
'customer': 'Linoel Gorgler', 'type': 'takeaway', 'district': None, 'note': None, 'review': 1},
{'order_id': 'e671173d-f238-4ac6-bfaf-22c7b223072f', 'price': 2400, 'customer': 'Alicea Catlette',
'type': 'dine-in', 'district': None, 'note': None, 'review': None},
{'order_id': '3c802dbe-9f56-44ba-9fc3-51d85f3b65e8', 'price': 1500, 'customer': 'Garreth Gepson',
'type': 'delivery', 'district': 'VI', 'note': None, 'review': None},
{'order_id': '5106156e-e53b-44cb-af71-86ebb176fe34', 'price': 3300,
'customer': 'Yance Chate', 'type': 'dine-in', 'district': None, 'note': None, 'review': 3},
{'order_id': 'b21ac870-e53e-42b8-b1e0-b591e3a77f96', 'price': 5900, 'customer': 'Jeanna Deyenhardt',
'type': 'delivery', 'district': 'VII', 'note': 'ketchup', 'review': 4},
{'order_id': 'd8c90b04-ad6a-4884-9160-1196e81ea311', 'price': 3200, 'customer': 'Connor Applewhaite',
'type': 'delivery', 'district': 'VI', 'note': None, 'review': None},
{'order_id': '6bd4ed52-0abb-4df7-98fa-83662487ad1a', 'price': 11300, 'customer': 'Renado Bausor',
'type': 'delivery', 'district': 'VIII', 'note': None, 'review': None},
{'order_id': '0553bfc6-c053-480b-a754-8352c88783da', 'price': 2300, 'customer': 'Jeanna Deyenhardt',
'type': 'takeaway', 'district': None, 'note': None, 'review': 4}
]
|
the-stack_106_17814
|
import argparse
import os
import os.path as osp
import torch
import mmcv
from mmaction.apis import init_recognizer
from mmcv.parallel import collate, scatter
from mmaction.datasets.pipelines import Compose
from mmaction.datasets import build_dataloader, build_dataset
from mmcv.parallel import MMDataParallel
import numpy as np
from tqdm import tqdm
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
def parse_args():
"""
experiments/baseline_rpl.py --config configs/recognition/tsm/inference_tsm_rpl.py \
--checkpoint work_dirs/tsm/finetune_ucf101_tsm_rpl/latest.pth \
--train_data data/ucf101/ucf101_train_split_1_videos.txt \
--ind_data
--result_prefix experiments/tsm/results_baselines/rpl/RPL
"""
parser = argparse.ArgumentParser(description='MMAction2 test')
# model and data config
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file/url')
parser.add_argument('--train_data', help='the split file of in-distribution training data')
parser.add_argument('--batch_size', type=int, default=8, help='the testing batch size')
# test data config
parser.add_argument('--ind_data', help='the split file of in-distribution testing data')
parser.add_argument('--ood_data', help='the split file of out-of-distribution testing data')
parser.add_argument('--ood_ncls', type=int, help='the number of classes in unknwon dataset')
parser.add_argument('--ood_dataname', choices=['HMDB', 'MiT'], help='the name of out-of-distribution testing data')
# env config
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_prefix', help='result file prefix')
args = parser.parse_args()
return args
def set_deterministic(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
def run_inference(config, checkpoint, data_split, batch_size, device):
# initialize recognition model
model = init_recognizer(config, checkpoint, device=device, use_frames=False)
torch.backends.cudnn.benchmark = True
model.cfg.data.test.test_mode = True
model.cfg.test_cfg.average_clips = 'prob' # we need the probability socore from softmax layer
model.cfg.data.videos_per_gpu = batch_size # batch size
model.cfg.data.test.ann_file = data_split
model.cfg.data.test.data_prefix = os.path.join(os.path.dirname(data_split), 'videos')
# build the dataloader
dataset = build_dataset(model.cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=model.cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=model.cfg.data.get('workers_per_gpu', 1),
dist=False,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting, **model.cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
# running the inference
model = MMDataParallel(model, device_ids=[0])
all_scores, all_labels = [], []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
scores = model(return_loss=False, **data) # (B, C)
all_scores.append(scores)
# gather labels
labels = data['label'].numpy()
all_labels.append(labels)
# use the first key as main key to calculate the batch size
bs = len(next(iter(data.values())))
for _ in range(bs):
prog_bar.update()
all_scores = np.concatenate(all_scores, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
return all_scores, all_labels
def evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, ood_ncls, thresh, num_rand=10):
ind_ncls = ind_softmax.shape[1]
ind_results = np.argmax(ind_softmax, axis=1)
ood_results = np.argmax(ood_softmax, axis=1)
# close-set accuracy (multi-class)
acc = accuracy_score(ind_labels, ind_results)
# open-set auc-roc (binary class)
ind_conf = np.max(ind_softmax, axis=1)
ood_conf = np.max(ood_softmax, axis=1)
preds = np.concatenate((ind_results, ood_results), axis=0)
confs = np.concatenate((ind_conf, ood_conf), axis=0)
preds[confs < threshold] = 1 # unknown class
preds[confs >= threshold] = 0 # known class
labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels)))
auc = roc_auc_score(labels, preds)
print('\nClosedSet Accuracy (multi-class): %.3lf, OpenSet AUC (bin-class): %.3lf'%(acc * 100, auc * 100))
ind_results[ind_conf < thresh] = ind_ncls # incorrect rejection
# open set F1 score (multi-class)
macro_F1_list = [f1_score(ind_labels, ind_results, average='macro')]
std_list = [0]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
ood_sub_confs = np.concatenate([ood_conf[ood_labels == clsid] for clsid in cls_select])
ood_sub_results[ood_sub_confs < thresh] = ind_ncls # correct rejection
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi)
std = np.std(macro_F1_multi)
macro_F1_list.append(macro_F1)
std_list.append(std)
# draw comparison curves
macro_F1_list = np.array(macro_F1_list)
std_list = np.array(std_list)
w_openness = np.array(openness_list) / 100.
open_maF1_mean = np.sum(w_openness * macro_F1_list) / np.sum(w_openness)
open_maF1_std = np.sum(w_openness * std_list) / np.sum(w_openness)
print('Open macro-F1 score: %.3f, std=%.3lf'%(open_maF1_mean * 100, open_maF1_std * 100))
return openness_list, macro_F1_list, std_list
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
set_deterministic(0)
modelname = os.path.dirname(args.config).split('/')[-1].upper()
######## Compute threshold with training data ########
result_file = os.path.join(os.path.dirname(args.result_prefix), modelname + '_RPL_trainset_softmax.npz')
if not os.path.exists(result_file):
# prepare result path
result_dir = os.path.dirname(result_file)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# run the inference on training data
trainset_scores, _ = run_inference(args.config, args.checkpoint, args.train_data, args.batch_size, device)
# save
np.savez(result_file[:-4], trainset_scores=trainset_scores)
else:
result = np.load(result_file)
trainset_scores = result['trainset_scores']
max_scores = np.max(trainset_scores, axis=1)
scores_sort = np.sort(max_scores)[::-1] # sort the uncertainties with descending order
N = max_scores.shape[0]
threshold = scores_sort[int(N * 0.95)-1] # 95% percentile
print('\nThe RPL softmax threshold on UCF-101 train set: %lf'%(threshold))
######## OOD and IND detection ########
testset_result = os.path.join(os.path.dirname(args.result_prefix), modelname +'_RPL_'+ args.ood_dataname +'_result.npz')
if not os.path.exists(testset_result):
# prepare result path
result_dir = os.path.dirname(testset_result)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# run the inference on OOD data
ood_softmax, ood_labels = run_inference(args.config, args.checkpoint, args.ood_data, args.batch_size, device)
# run the inference on IND data
ind_softmax, ind_labels = run_inference(args.config, args.checkpoint, args.ind_data, args.batch_size, device)
# save
np.savez(testset_result[:-4], ind_softmax=ind_softmax, ood_softmax=ood_softmax,
ind_label=ind_labels, ood_label=ood_labels)
else:
results = np.load(testset_result, allow_pickle=True)
ind_softmax = results['ind_softmax'] # (N1, C)
ood_softmax = results['ood_softmax'] # (N2, C)
ind_labels = results['ind_label'] # (N1,)
ood_labels = results['ood_label'] # (N2,)
openness_list, macro_F1_list, std_list = evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, args.ood_ncls, threshold)
|
the-stack_106_17815
|
import functools
from django import http
from django.core.exceptions import PermissionDenied
from amo.decorators import login_required
from access import acl
from addons.decorators import addon_view
from devhub.models import SubmitStep
def dev_required(owner_for_post=False, allow_editors=False, theme=False):
"""Requires user to be add-on owner or admin.
When allow_editors is True, an editor can view the page.
"""
def decorator(f):
@addon_view
@login_required
@functools.wraps(f)
def wrapper(request, addon, *args, **kw):
from devhub.views import _resume
if theme:
kw['theme'] = addon.is_persona()
elif addon.is_persona():
# Don't allow theme views if theme not passed in.
raise http.Http404
fun = lambda: f(request, addon_id=addon.id, addon=addon,
*args, **kw)
if allow_editors:
if acl.check_reviewer(request):
return fun()
# Require an owner or dev for POST requests.
if request.method == 'POST':
if acl.check_addon_ownership(request, addon,
dev=not owner_for_post):
return fun()
# Ignore disabled so they can view their add-on.
elif acl.check_addon_ownership(request, addon, viewer=True,
ignore_disabled=True):
step = SubmitStep.objects.filter(addon=addon)
# Redirect to the submit flow if they're not done.
if not getattr(f, 'submitting', False) and step:
return _resume(addon, step)
return fun()
raise PermissionDenied
return wrapper
# The arg will be a function if they didn't pass owner_for_post.
if callable(owner_for_post):
f = owner_for_post
owner_for_post = False
return decorator(f)
else:
return decorator
|
the-stack_106_17817
|
"""
https://matplotlib.org/stable/gallery/style_sheets/style_sheets_reference.html
"""
import re
import matplotlib.pyplot as plt
import numpy as np
# Fixing random state for reproducibility
np.random.seed(19680801)
def plot_scatter(ax, prng, nb_samples=100):
"""Scatter plot."""
for mu, sigma, marker in [(-0.5, 0.75, "o"), (0.75, 1.0, "s")]:
x, y = prng.normal(loc=mu, scale=sigma, size=(2, nb_samples))
ax.plot(x, y, ls="none", marker=marker)
ax.set_xlabel("X-label")
ax.set_title("Axes title")
return ax
def plot_colored_sinusoidal_lines(ax):
"""Plot sinusoidal lines with colors following the style color cycle."""
L = 2 * np.pi
x = np.linspace(0, L)
nb_colors = len(plt.rcParams["axes.prop_cycle"])
shift = np.linspace(0, L, nb_colors, endpoint=False)
for s in shift:
ax.plot(x, np.sin(x + s), "-")
ax.set_xlim([x[0], x[-1]])
return ax
def plot_bar_graphs(ax, prng, min_value=5, max_value=25, nb_samples=5):
"""Plot two bar graphs side by side, with letters as x-tick labels."""
x = np.arange(nb_samples)
ya, yb = prng.randint(min_value, max_value, size=(2, nb_samples))
width = 0.25
ax.bar(x, ya, width)
ax.bar(x + width, yb, width, color="C2")
ax.set_xticks(x + width)
ax.set_xticklabels(["a", "b", "c", "d", "e"])
return ax
def plot_colored_circles(ax, prng, nb_samples=15):
"""
Plot circle patches.
NB: draws a fixed amount of samples, rather than using the length of
the color cycle, because different styles may have different numbers
of colors.
"""
for sty_dict, j in zip(plt.rcParams["axes.prop_cycle"], range(nb_samples)):
ax.add_patch(
plt.Circle(
prng.normal(scale=3, size=2), radius=1.0, color=sty_dict["color"]
)
)
# Force the limits to be the same across the styles (because different
# styles may have different numbers of available colors).
ax.set_xlim([-4, 8])
ax.set_ylim([-5, 6])
ax.set_aspect("equal", adjustable="box") # to plot circles as circles
return ax
def plot_image_and_patch(ax, prng, size=(20, 20)):
"""Plot an image with random values and superimpose a circular patch."""
values = prng.random_sample(size=size)
ax.imshow(values, interpolation="none")
c = plt.Circle((5, 5), radius=5, label="patch")
ax.add_patch(c)
# Remove ticks
ax.set_xticks([])
ax.set_yticks([])
def plot_histograms(ax, prng, nb_samples=10000):
"""Plot 4 histograms and a text annotation."""
params = ((10, 10), (4, 12), (50, 12), (6, 55))
for a, b in params:
values = prng.beta(a, b, size=nb_samples)
ax.hist(values, histtype="stepfilled", bins=30, alpha=0.8, density=True)
# Add a small annotation.
ax.annotate(
"Annotation",
xy=(0.25, 4.25),
xytext=(0.9, 0.9),
textcoords=ax.transAxes,
va="top",
ha="right",
bbox=dict(boxstyle="round", alpha=0.2),
arrowprops=dict(
arrowstyle="->", connectionstyle="angle,angleA=-95,angleB=35,rad=10"
),
)
return ax
def plot_figure(style_label):
"""Setup and plot the demonstration figure with a given style."""
# Use a dedicated RandomState instance to draw the same "random" values
# across the different figures.
prng = np.random.RandomState(96917002)
# Tweak the figure size to be better suited for a row of numerous plots:
# double the width and halve the height. NB: use relative changes because
# some styles may have a figure size different from the default one.
(fig_width, fig_height) = plt.rcParams["figure.figsize"]
fig_size = [fig_width * 4 / 1.5, fig_height / 1.5]
fig, axs = plt.subplots(
ncols=5, nrows=1, num=style_label, figsize=fig_size, squeeze=True
)
fig.suptitle(style_label)
plot_scatter(axs[0], prng)
plot_bar_graphs(axs[1], prng)
plot_colored_circles(axs[2], prng)
plot_colored_sinusoidal_lines(axs[3])
plot_histograms(axs[4], prng)
fig.tight_layout()
return fig
def _normalize(string):
string = re.sub("[^0-9a-zA-Z ]+", "", string)
return string.replace(" ", "-").lower()
if __name__ == "__main__":
import matplotx
schemes = {
"Aura (dark)": matplotx.styles.aura["dark"],
"Aura (dark soft)": matplotx.styles.aura["dark-soft"],
"ayu (dark)": matplotx.styles.ayu["dark"],
"ayu (light)": matplotx.styles.ayu["light"],
"ayu (mirage)": matplotx.styles.ayu["mirage"],
"Challenger Deep": matplotx.styles.challenger_deep,
"Pitaya Smoothie (dark)": matplotx.styles.pitaya_smoothie["dark"],
"Pitaya Smoothie (light)": matplotx.styles.pitaya_smoothie["light"],
"Pacoty": matplotx.styles.pacoty,
"Dracula": matplotx.styles.dracula,
"GitHub (dark)": matplotx.styles.github["dark"],
"GitHub (dimmed)": matplotx.styles.github["dimmed"],
"GitHub (light)": matplotx.styles.github["light"],
"gruvbox (dark)": matplotx.styles.gruvbox["dark"],
"gruvbox (light)": matplotx.styles.gruvbox["light"],
"Nord": matplotx.styles.nord,
"One Dark": matplotx.styles.onedark,
"Solarized (dark)": matplotx.styles.solarized["dark"],
"Solarized (light)": matplotx.styles.solarized["light"],
"Tableau 10": matplotx.styles.tab10,
"Tableau 20": matplotx.styles.tab20,
"Tokyo Night (storm)": matplotx.styles.tokyo_night["storm"],
"Tokyo Night (night)": matplotx.styles.tokyo_night["night"],
"Tokyo Night (day)": matplotx.styles.tokyo_night["day"],
}
for name, scheme in schemes.items():
with plt.style.context(scheme):
fig = plot_figure(name)
# plt.savefig("dracula.svg", bbox_inches="tight")
plt.savefig(f"{_normalize(name)}.svg")
# plt.show()
plt.close()
|
the-stack_106_17825
|
# -*- coding: utf-8 -*-
import pytest
from pandas import Categorical
from pandas.util.testing import assert_categorical_equal
@pytest.mark.parametrize("c", [
Categorical([1, 2, 3, 4]),
Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4, 5]),
])
def test_categorical_equal(c):
assert_categorical_equal(c, c)
@pytest.mark.parametrize("check_category_order", [True, False])
def test_categorical_equal_order_mismatch(check_category_order):
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([1, 2, 3, 4], categories=[4, 3, 2, 1])
kwargs = dict(check_category_order=check_category_order)
if check_category_order:
msg = """Categorical\\.categories are different
Categorical\\.categories values are different \\(100\\.0 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[4, 3, 2, 1\\], dtype='int64'\\)"""
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2, **kwargs)
else:
assert_categorical_equal(c1, c2, **kwargs)
def test_categorical_equal_categories_mismatch():
msg = """Categorical\\.categories are different
Categorical\\.categories values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 5\\], dtype='int64'\\)"""
c1 = Categorical([1, 2, 3, 4])
c2 = Categorical([1, 2, 3, 5])
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2)
def test_categorical_equal_codes_mismatch():
categories = [1, 2, 3, 4]
msg = """Categorical\\.codes are different
Categorical\\.codes values are different \\(50\\.0 %\\)
\\[left\\]: \\[0, 1, 3, 2\\]
\\[right\\]: \\[0, 1, 2, 3\\]"""
c1 = Categorical([1, 2, 4, 3], categories=categories)
c2 = Categorical([1, 2, 3, 4], categories=categories)
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2)
def test_categorical_equal_ordered_mismatch():
data = [1, 2, 3, 4]
msg = """Categorical are different
Attribute "ordered" are different
\\[left\\]: False
\\[right\\]: True"""
c1 = Categorical(data, ordered=False)
c2 = Categorical(data, ordered=True)
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2)
@pytest.mark.parametrize("obj", ["index", "foo", "pandas"])
def test_categorical_equal_object_override(obj):
data = [1, 2, 3, 4]
msg = """{obj} are different
Attribute "ordered" are different
\\[left\\]: False
\\[right\\]: True""".format(obj=obj)
c1 = Categorical(data, ordered=False)
c2 = Categorical(data, ordered=True)
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2, obj=obj)
|
the-stack_106_17826
|
from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
loc0 = Location(env, mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, x), i_1)))
loc1 = Location(env, mgr.GE(x, i_20))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x6", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_20), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(0, mgr.Equals(x_y, mgr.Times(y, y)))
h_y = Hint("h_y5", env, frozenset([y]), symbs)
h_y.set_locs([loc0])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(2, mgr.GE(x_y, i_20))
loc2 = Location(env, mgr.TRUE())
loc2.set_progress(0, mgr.And(mgr.GE(x_y, m_100), mgr.LE(x_y, i_0)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.GT(x_x, y))
loc2 = Location(env, mgr.GE(x, i_2))
loc2.set_progress(0, mgr.GE(x_x, i_20))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.LE(x, i_0))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, x)))
loc1 = Location(env, mgr.GE(x, i_0))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x5", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(x, y)))
loc1 = Location(env, mgr.GE(x, i_2), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.GE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
return frozenset(res)
|
the-stack_106_17827
|
# Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import logging
from schema import Schema, And, Optional, Or
from appimagebuilder.recipe.roamer import Roamer
class RecipeSchema:
def __init__(self):
self.version = Schema(int, ignore_extra_keys=True)
self.script = Schema(Or(str, [str]))
self.v1_app_info = {
"id": str,
Optional("name"): str,
Optional("icon"): str,
"version": str,
"exec": str,
Optional("exec_args"): str,
}
self.v1_files = {
Optional("include"): [str],
Optional("exclude"): [str],
}
self.v1_runtime = {
Optional("debug"): bool,
Optional("version"): str,
Optional("path_mappings"): [str],
Optional("arch"): [Or("gnueabihf", "x86_64", "i386", "aarch64")],
Optional("env"): {str: Or(str, int, bool)},
Optional("preserve"): [str],
}
self.v1_tests = {
str: {
"image": str,
"command": str,
Optional("before_command"): self.script,
Optional("use_host_x"): bool,
Optional("env"): {str: Or(str, int, bool)},
}
}
self.v1_apt = Schema(
{
"arch": Or(str, [str]),
"sources": [{"sourceline": str, Optional("key_url"): str}],
"include": [str],
Optional("exclude"): [str],
Optional("allow_unauthenticated"): bool,
}
)
self.v1_pacman = Schema(
{
Optional("Architecture"): Or("auto", "x86_64", "i686", "aarch64"),
Optional("repositories"): {str: [str]},
Optional("keyrings"): [str],
Optional("options"): {str: str},
"include": [str],
Optional("exclude"): [str],
}
)
self.v1_appdir = Schema(
{
Optional("path"): str,
"app_info": self.v1_app_info,
Optional("files"): self.v1_files,
Optional("apt"): self.v1_apt,
Optional("pacman"): self.v1_pacman,
Optional("runtime"): self.v1_runtime,
Optional("test"): self.v1_tests,
Optional("before_bundle"): self.script,
Optional("after_bundle"): self.script,
Optional("before_runtime"): self.script,
Optional("after_runtime"): self.script,
}
)
self.v1_appimage = Schema(
{
"arch": str,
Optional("update-information"): str,
Optional("sign-key"): str,
Optional("file_name"): str,
}
)
self.v1 = Schema(
{
"version": int,
Optional("script"): self.script,
"AppDir": self.v1_appdir,
Optional("AppImage"): self.v1_appimage,
}
)
def validate(self, recipe: Roamer):
if recipe.version() == 1:
return self.v1.validate(recipe(resolve_variables=False))
else:
logging.error("Unknown recipe version: %s" % recipe.version())
logging.info(
"Please make sure you're using the latest appimage-builder version"
)
exit(1)
|
the-stack_106_17828
|
import json
from newrelic_rest_api import NewRelicRestAPI
class NewRelicAccount():
def __init__(self, rest_api_key=''):
self.__rest_api = NewRelicRestAPI(rest_api_key)
self.__cache = []
def __get_cache(self, set_name):
L = list(filter(lambda set: set['set_name'] == set_name, self.__cache))
if len(L) == 1:
return L[0]['data'], True
else:
return [], False
def users():
doc = "The users dictionary."
def fget(self):
result, ok = self.__get_cache('users')
if not ok:
result, ok = self.__rest_api.get_set('users')
if ok:
result = [{'id':item['id'], 'data':item} for item in result]
self.__cache.append({
'set_name': 'users',
'data': result,
})
return result, ok
return locals()
users = property(**users())
def apm_applications():
doc = "The applications dictionary."
def fget(self):
result, ok = self.__get_cache('applications')
if not ok:
result, ok = self.__rest_api.get_set('applications')
if ok:
result = [{'id':item['id'], 'data':item} for item in result]
self.__cache.append({
'set_name': 'applications',
'data': result,
})
return result, ok
return locals()
apm_applications = property(**apm_applications())
def mobile_applications():
doc = "The mobile applications dictionary."
def fget(self):
result, ok = self.__get_cache('mobile_applications')
if not ok:
result, ok = self.__rest_api.get_set('mobile_applications')
if ok:
result = [{'id':item['id'], 'data':item} for item in result]
self.__cache.append({
'set_name': 'mobile_applications',
'data': result,
})
return result, ok
return locals()
mobile_applications = property(**mobile_applications())
def browser_applications():
doc = "The browser applications dictionary."
def fget(self):
result, ok = self.__get_cache('browser_applications')
if not ok:
result, ok = self.__rest_api.get_set('browser_applications')
if ok:
result = [{'id':item['id'], 'data':item} for item in result]
self.__cache.append({
'set_name': 'browser_applications',
'data': result,
})
return result, ok
return locals()
browser_applications = property(**browser_applications())
def alerts_policies():
doc = "The alerts policies dictionary."
def fget(self):
result, ok = self.__get_cache('alerts_policies')
if not ok:
result, ok = self.__rest_api.get_set('alerts_policies')
if ok:
result = [{'id':item['id'], 'data':item} for item in result]
self.__cache.append({
'set_name': 'alerts_policies',
'data': result,
})
return result, ok
return locals()
alerts_policies = property(**alerts_policies())
if __name__ == "__main__":
account = NewRelicAccount()
result, ok = account.applications
if ok:
print(json.dumps(result, sort_keys=True, indent=4))
|
the-stack_106_17830
|
import threading
import collections
import random
import time
BUFFER_SIZE = 10
REPONEDORES = random.randint(0,2)
CLIENTES = random.randint(3,6)
nombres_c = []
# DECLARACIÓN DEL MONITOR
class maquina(object):
def __init__(self, arg):
self.nRep = REPONEDORES
self.nCli = CLIENTES
self.replenishing = False
self.nSodas = 0
self.mutex = threading.Lock()
self.canConsume = threading.Condition(self.mutex)
self.canReplenish = threading.Condition(self.mutex)
# OPERACIONES
def consumir(self, name, consumiciones):
with self.mutex:
cons_done = 0
while cons_done != consumiciones:
if (self.nRep > 0):
while self.replenishing or self.nSodas == 0:
self.canConsume.wait()
self.nSodas = self.nSodas - 1
self.canConsume.notify()
time.sleep(random.randint(1,2))
cons_done = cons_done + 1
print (" ",name, "coge un refresco - consumición: ", (cons_done))
self.canReplenish.notify()
else:
consumiciones = 0
print(name, "Aquí nadie repone la máquina!")
self.nCli = self.nCli - 1
print("--->" + name + " se'n va, queden " + str(self.nCli) + " clients")
def rellenarMaquina(self, id):
with self.mutex:
while self.nCli > 0:
while self.nSodas == BUFFER_SIZE and self.nCli > 0:
self.canReplenish.wait()
self.replenishing = True
counter = 0
while self.nSodas != BUFFER_SIZE:
self.nSodas = self.nSodas + 1
counter = counter + 1
time.sleep(random.randint(2,4))
if counter > 0:
print("Reposador", id[7:9], " reposa la màquina, hi ha ", str((self.nSodas-counter)), " i en posa ", str(counter))
self.replenishing = False
self.canConsume.notify()
print("Reposador", id[7:9], "se'n va")
# PRESENTACIONES
def r_saluda(self, id):
with self.mutex:
print ("Reposador", id[7:9], "arriba")
def c_saluda(self, name, consumiciones):
with self.mutex:
print (" ", name, "arriba i farà", str(consumiciones), "consumicions")
# PROCESOS QUE INTERVIENEN
def clientes(monitor, name): # consumidores
consumiciones = random.randint(0,6)
monitor.c_saluda(name, consumiciones)
monitor.consumir(name, consumiciones)
def reponedores(monitor): # productores
id = threading.current_thread().name
monitor.r_saluda(id)
monitor.rellenarMaquina(id)
# MÉTODO MAIN
def main():
threads = []
monitor = maquina(BUFFER_SIZE)
n = 0
# Print donde hacemos recuento de los clientes de la maquina
print ("COMENÇA LA SIMULACIÓ")
print ("Avui hi ha ",str(CLIENTES), "clientes i", str(REPONEDORES), "reposadors" )
print ("La màquina de refrescs està buida, hi caben ", str(BUFFER_SIZE), "refrescs")
for i in range(CLIENTES):
num = random.randint(1, 250)
file = open(r"/content/drive/My Drive/LlistaNoms.txt", "r", encoding="utf8")
lines = file.readlines()
name = lines[num]
name = name.rstrip('\n')
nombres_c.append(name)
file.close()
for i in range(CLIENTES):
c = threading.Thread(target=clientes, args=(monitor, str(nombres_c[n]),))
n = n + 1
threads.append(c)
for i in range(REPONEDORES):
p = threading.Thread(target=reponedores, args=(monitor,))
threads.append(p)
# Empiezan todos los threads
for t in threads:
t.start()
# Espera a la finalizacion de los threads
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
the-stack_106_17832
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests to ensure that the training loop works with a dict (1.0)
"""
import collections
import itertools
import os
from unittest import mock
import numpy as np
import pytest
import torch
from torch.utils.data import Dataset
import pytorch_lightning as pl
from pytorch_lightning import callbacks, Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.core.lightning import LightningModule
from tests.helpers.boring_model import BoringModel, RandomDictDataset, RandomDictStringDataset
from tests.helpers.deterministic_model import DeterministicModel
from tests.helpers.runif import RunIf
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
def test__training_step__log(tmpdir):
"""
Tests that only training_step can be used
"""
class TestModel(DeterministicModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
acc = acc + batch_idx
# -----------
# default
# -----------
self.log('default', acc)
# -----------
# logger
# -----------
# on_step T on_epoch F
self.log('l_s', acc, on_step=True, on_epoch=False, prog_bar=False, logger=True)
# on_step F on_epoch T
self.log('l_e', acc, on_step=False, on_epoch=True, prog_bar=False, logger=True)
# on_step T on_epoch T
self.log('l_se', acc, on_step=True, on_epoch=True, prog_bar=False, logger=True)
# -----------
# pbar
# -----------
# on_step T on_epoch F
self.log('p_s', acc, on_step=True, on_epoch=False, prog_bar=True, logger=False)
# on_step F on_epoch T
self.log('p_e', acc, on_step=False, on_epoch=True, prog_bar=True, logger=False)
# on_step T on_epoch T
self.log('p_se', acc, on_step=True, on_epoch=True, prog_bar=True, logger=False)
self.training_step_called = True
return acc
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
weights_summary=None,
callbacks=[ModelCheckpoint(monitor='l_se')],
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert not model.training_epoch_end_called
# make sure all the metrics are available for callbacks
logged_metrics = set(trainer.logged_metrics.keys())
expected_logged_metrics = {
'epoch',
'default',
'l_e',
'l_s',
'l_se_step',
'l_se_epoch',
}
assert logged_metrics == expected_logged_metrics
pbar_metrics = set(trainer.progress_bar_metrics.keys())
expected_pbar_metrics = {
'p_e',
'p_s',
'p_se_step',
'p_se_epoch',
}
assert pbar_metrics == expected_pbar_metrics
callback_metrics = set(trainer.callback_metrics.keys())
callback_metrics.remove('debug_epoch')
expected_callback_metrics = set()
expected_callback_metrics = expected_callback_metrics.union(logged_metrics)
expected_callback_metrics = expected_callback_metrics.union(pbar_metrics)
expected_callback_metrics.update({'p_se', 'l_se'})
expected_callback_metrics.remove('epoch')
assert callback_metrics == expected_callback_metrics
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
def test__training_step__epoch_end__log(tmpdir):
"""
Tests that only training_step can be used
"""
class TestModel(DeterministicModel):
def training_step(self, batch, batch_idx):
self.training_step_called = True
acc = self.step(batch, batch_idx)
acc = acc + batch_idx
self.log('a', acc, on_step=True, on_epoch=True)
self.log_dict({'a1': acc, 'a2': acc})
return acc
def training_epoch_end(self, outputs):
self.training_epoch_end_called = True
self.log('b1', outputs[0]['loss'])
self.log('b', outputs[0]['loss'], on_epoch=True, prog_bar=True, logger=True)
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert model.training_epoch_end_called
# make sure all the metrics are available for callbacks
logged_metrics = set(trainer.logged_metrics.keys())
expected_logged_metrics = {'epoch', 'a_step', 'a_epoch', 'b', 'b1', 'a1', 'a2'}
assert logged_metrics == expected_logged_metrics
pbar_metrics = set(trainer.progress_bar_metrics.keys())
expected_pbar_metrics = {'b'}
assert pbar_metrics == expected_pbar_metrics
callback_metrics = set(trainer.callback_metrics.keys())
callback_metrics.remove('debug_epoch')
expected_callback_metrics = set()
expected_callback_metrics = expected_callback_metrics.union(logged_metrics)
expected_callback_metrics = expected_callback_metrics.union(pbar_metrics)
expected_callback_metrics.remove('epoch')
expected_callback_metrics.add('a')
assert callback_metrics == expected_callback_metrics
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
@pytest.mark.parametrize(['batches', 'log_interval', 'max_epochs'], [(1, 1, 1), (64, 32, 2)])
def test__training_step__step_end__epoch_end__log(tmpdir, batches, log_interval, max_epochs):
"""
Tests that only training_step can be used
"""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.training_step_called = True
loss = self.step(batch[0])
self.log('a', loss, on_step=True, on_epoch=True)
return loss
def training_step_end(self, out):
self.training_step_end_called = True
self.log('b', out, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return out
def training_epoch_end(self, outputs):
self.training_epoch_end_called = True
self.log('c', outputs[0]['loss'], on_epoch=True, prog_bar=True, logger=True)
self.log('d/e/f', 2)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=batches,
limit_val_batches=batches,
max_epochs=max_epochs,
log_every_n_steps=log_interval,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert model.training_step_end_called
assert model.training_epoch_end_called
# make sure all the metrics are available for callbacks
logged_metrics = set(trainer.logged_metrics.keys())
expected_logged_metrics = {'a_step', 'a_epoch', 'b_step', 'b_epoch', 'c', 'd/e/f', 'epoch'}
assert logged_metrics == expected_logged_metrics
pbar_metrics = set(trainer.progress_bar_metrics.keys())
expected_pbar_metrics = {'c', 'b_epoch', 'b_step'}
assert pbar_metrics == expected_pbar_metrics
callback_metrics = set(trainer.callback_metrics.keys())
callback_metrics.remove('debug_epoch')
expected_callback_metrics = set()
expected_callback_metrics = expected_callback_metrics.union(logged_metrics)
expected_callback_metrics = expected_callback_metrics.union(pbar_metrics)
expected_callback_metrics.update({'a', 'b'})
expected_callback_metrics.remove('epoch')
assert callback_metrics == expected_callback_metrics
# assert the loggers received the expected number
assert len(trainer.dev_debugger.logged_metrics) == ((batches / log_interval) * max_epochs) + max_epochs
@pytest.mark.parametrize(['batches', 'fx', 'result'], [(1, min, 0), (2, max, 1), (11, max, 10)])
def test__training_step__log_max_reduce_fx(tmpdir, batches, fx, result):
"""
Tests that log works correctly with different tensor types
"""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch[0])
self.log('foo', torch.tensor(batch_idx).long(), on_step=False, on_epoch=True, reduce_fx=fx)
return acc
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log('bar', torch.tensor(batch_idx).float(), on_step=False, on_epoch=True, reduce_fx=fx)
return {"x": loss}
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=batches,
limit_val_batches=batches,
max_epochs=2,
weights_summary=None,
)
trainer.fit(model)
# make sure types are correct
assert trainer.logged_metrics['foo'] == result
assert trainer.logged_metrics['bar'] == result
def test_tbptt_log(tmpdir):
"""
Tests that only training_step can be used
"""
truncated_bptt_steps = 2
sequence_size = 30
batch_size = 30
x_seq = torch.rand(batch_size, sequence_size, 1)
y_seq_list = torch.rand(batch_size, sequence_size, 1).tolist()
class MockSeq2SeqDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return x_seq, y_seq_list
def __len__(self):
return 1
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.test_hidden = None
self.layer = torch.nn.Linear(2, 2)
def training_step(self, batch, batch_idx, hiddens):
assert hiddens == self.test_hidden, "Hidden state not persistent between tbptt steps"
self.test_hidden = torch.rand(1)
x_tensor, y_list = batch
assert x_tensor.shape[1] == truncated_bptt_steps, "tbptt split Tensor failed"
y_tensor = torch.tensor(y_list, dtype=x_tensor.dtype)
assert y_tensor.shape[1] == truncated_bptt_steps, "tbptt split list failed"
pred = self(x_tensor.view(batch_size, truncated_bptt_steps))
loss = torch.nn.functional.mse_loss(pred, y_tensor.view(batch_size, truncated_bptt_steps))
self.log('a', loss, on_epoch=True)
return {'loss': loss, 'hiddens': self.test_hidden}
def on_train_epoch_start(self) -> None:
self.test_hidden = None
def train_dataloader(self):
return torch.utils.data.DataLoader(
dataset=MockSeq2SeqDataset(),
batch_size=batch_size,
shuffle=False,
sampler=None,
)
model = TestModel()
model.training_epoch_end = None
model.example_input_array = torch.randn(5, truncated_bptt_steps)
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=10,
limit_val_batches=0,
truncated_bptt_steps=truncated_bptt_steps,
max_epochs=2,
log_every_n_steps=2,
weights_summary=None,
)
trainer.fit(model)
generated = set(trainer.logged_metrics.keys())
expected = {'a_step', 'a_epoch', 'epoch'}
assert generated == expected
def test_different_batch_types_for_sizing(tmpdir):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
assert isinstance(batch, dict)
a = batch['a']
acc = self.step(a)
self.log('a', {'d1': 2, 'd2': torch.tensor(1)}, on_step=True, on_epoch=True)
return acc
def validation_step(self, batch, batch_idx):
assert isinstance(batch, dict)
a = batch['a']
output = self.layer(a)
loss = self.loss(batch, output)
self.log('n', {'d3': 2, 'd4': torch.tensor(1)}, on_step=True, on_epoch=True)
return {"x": loss}
def train_dataloader(self):
return torch.utils.data.DataLoader(RandomDictDataset(32, 64), batch_size=32)
def val_dataloader(self):
return torch.utils.data.DataLoader(RandomDictDataset(32, 64), batch_size=32)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=2,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
generated = set(trainer.logger_connector.logged_metrics)
expected = {'a_step', 'a_epoch', 'n_step/epoch_0', 'n_epoch', 'epoch'}
assert generated == expected
def test_validation_step_with_string_data_logging(tmpdir):
class TestModel(BoringModel):
def on_train_epoch_start(self) -> None:
print("override any method to prove your bug")
def training_step(self, batch, batch_idx):
output = self.layer(batch["x"])
loss = self.loss(batch, output)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
output = self.layer(batch["x"])
loss = self.loss(batch, output)
self.log("x", loss)
return {"x": loss}
# fake data
train_data = torch.utils.data.DataLoader(RandomDictStringDataset(32, 64))
val_data = torch.utils.data.DataLoader(RandomDictStringDataset(32, 64))
# model
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model, train_data, val_data)
def test_nested_datasouce_batch(tmpdir):
class NestedDictStringDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
x = {
'post_text': ['bird is fast', 'big cat'],
'dense_0': [
torch.tensor([-0.1000, 0.2000], dtype=torch.float64),
torch.tensor([1, 1], dtype=torch.uint8),
],
'post_id': ['115', '116'],
'label': [torch.tensor([0, 1]), torch.tensor([1, 1], dtype=torch.uint8)]
}
return x
def __len__(self):
return self.len
class TestModel(BoringModel):
def on_train_epoch_start(self) -> None:
print("override any method to prove your bug")
def training_step(self, batch, batch_idx):
output = self.layer(torch.rand(32))
loss = self.loss(batch, output)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
output = self.layer(torch.rand(32))
loss = self.loss(batch, output)
self.log("x", loss)
return {"x": loss}
# fake data
train_data = torch.utils.data.DataLoader(NestedDictStringDataset(32, 64))
val_data = torch.utils.data.DataLoader(NestedDictStringDataset(32, 64))
# model
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model, train_data, val_data)
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
def test_log_works_in_train_callback(tmpdir):
"""
Tests that log can be called within callback
"""
class TestCallback(callbacks.Callback):
# helpers
count = 1
choices = [False, True]
# used to compute expected values
callback_funcs_called = collections.defaultdict(list)
funcs_called_count = collections.defaultdict(int)
funcs_attr = {}
def make_logging(
self, pl_module: pl.LightningModule, func_name, func_idx, on_steps=[], on_epochs=[], prob_bars=[]
):
self.funcs_called_count[func_name] += 1
iterate = list(itertools.product(*[on_steps, on_epochs, prob_bars]))
for idx, (on_step, on_epoch, prog_bar) in enumerate(iterate):
# run logging
custom_func_name = f"{func_idx}_{idx}_{func_name}"
pl_module.log(
custom_func_name, self.count * func_idx, on_step=on_step, on_epoch=on_epoch, prog_bar=prog_bar
)
# catch information for verification
# on on_train_start is outside the main loop. Won't be called
if func_name == "on_train_start":
self.callback_funcs_called[func_name].append([self.count * func_idx])
# Saved only values from second epoch, so we can compute its mean or latest.
if pl_module.trainer.current_epoch == 1:
self.callback_funcs_called[func_name].append([self.count * func_idx])
forked = on_step and on_epoch
self.funcs_attr[custom_func_name] = {
"on_step": on_step,
"on_epoch": on_epoch,
"prog_bar": prog_bar,
"forked": forked,
"func_name": func_name
}
if on_step and on_epoch:
self.funcs_attr[f"{custom_func_name}_step"] = {
"on_step": True,
"on_epoch": False,
"prog_bar": prog_bar,
"forked": False,
"func_name": func_name
}
self.funcs_attr[f"{custom_func_name}_epoch"] = {
"on_step": False,
"on_epoch": True,
"prog_bar": prog_bar,
"forked": False,
"func_name": func_name
}
def on_train_start(self, trainer, pl_module):
self.make_logging(
pl_module, 'on_train_start', 1, on_steps=self.choices, on_epochs=self.choices, prob_bars=self.choices
)
def on_epoch_start(self, trainer, pl_module):
self.make_logging(
pl_module, 'on_epoch_start', 2, on_steps=self.choices, on_epochs=self.choices, prob_bars=self.choices
)
def on_train_epoch_start(self, trainer, pl_module):
self.make_logging(
pl_module,
'on_train_epoch_start',
3,
on_steps=self.choices,
on_epochs=self.choices,
prob_bars=self.choices
)
def on_batch_end(self, trainer, pl_module):
self.make_logging(
pl_module, 'on_batch_end', 6, on_steps=self.choices, on_epochs=self.choices, prob_bars=self.choices
)
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.make_logging(
pl_module,
'on_train_batch_end',
7,
on_steps=self.choices,
on_epochs=self.choices,
prob_bars=self.choices
)
# used to make sure aggregation works fine.
# we should obtain func[value * c for c in range(1, max_epochs * limit_train_batches)])
# with func = np.mean if on_epoch else func = np.max
self.count += 1
def on_train_epoch_end(self, trainer, pl_module, outputs):
self.make_logging(
pl_module, 'on_train_epoch_end', 8, on_steps=[False], on_epochs=self.choices, prob_bars=self.choices
)
def on_epoch_end(self, trainer, pl_module):
self.make_logging(
pl_module, 'on_epoch_end', 9, on_steps=[False], on_epochs=self.choices, prob_bars=self.choices
)
class TestModel(BoringModel):
manual_loss = []
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.manual_loss.append(loss)
self.log('train_loss', loss)
return {"loss": loss}
max_epochs = 2
limit_train_batches = 2
model = TestModel()
test_callback = TestCallback()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=limit_train_batches,
limit_val_batches=0,
limit_test_batches=0,
val_check_interval=0.,
num_sanity_val_steps=0,
max_epochs=max_epochs,
callbacks=[test_callback]
)
trainer.fit(model)
assert test_callback.funcs_called_count["on_train_start"] == 1
assert test_callback.funcs_called_count["on_epoch_start"] == 2
assert test_callback.funcs_called_count["on_train_epoch_start"] == 2
assert test_callback.funcs_called_count["on_batch_end"] == 4
assert test_callback.funcs_called_count["on_epoch_end"] == 2
assert test_callback.funcs_called_count["on_train_batch_end"] == 4
assert test_callback.funcs_called_count["on_epoch_end"] == 2
assert test_callback.funcs_called_count["on_train_epoch_end"] == 2
# Make sure the func_name exists within callback_metrics. If not, we missed some
callback_metrics_keys = [*trainer.callback_metrics.keys()]
for func_name in test_callback.callback_funcs_called.keys():
is_in = False
for callback_metrics_key in callback_metrics_keys:
if func_name in callback_metrics_key:
is_in = True
assert is_in, (func_name, callback_metrics_keys)
# function used to describe expected return logic
def get_expected_output(func_attr, original_values):
if func_attr["on_epoch"] and not func_attr["on_step"]:
# Apply mean on values
expected_output = np.mean(original_values)
else:
# Keep the latest value
expected_output = np.max(original_values)
return expected_output
# Make sure the func_name output equals the average from all logged values when on_epoch true
# pop extra keys
trainer.callback_metrics.pop("debug_epoch")
assert trainer.logged_metrics["train_loss"] == model.manual_loss[-1]
assert trainer.callback_metrics["train_loss"] == model.manual_loss[-1]
trainer.callback_metrics.pop("train_loss")
for func_name, output_value in trainer.callback_metrics.items():
if torch.is_tensor(output_value):
output_value = output_value.item()
# get creation attr
func_attr = test_callback.funcs_attr[func_name]
# retrived orginal logged values
original_values = test_callback.callback_funcs_called[func_attr["func_name"]]
# compute expected output and compare to actual one
expected_output = get_expected_output(func_attr, original_values)
assert float(output_value) == float(expected_output)
for func_name, func_attr in test_callback.funcs_attr.items():
if func_attr["prog_bar"] and (func_attr["on_step"] or func_attr["on_epoch"]) and not func_attr["forked"]:
assert func_name in trainer.logger_connector.progress_bar_metrics
else:
assert func_name not in trainer.logger_connector.progress_bar_metrics
def test_logging_sync_dist_true_cpu(tmpdir):
"""
Tests to ensure that the sync_dist flag works with CPU (should just return the original value)
"""
fake_result = 1
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch[0])
self.log('foo', torch.tensor(fake_result), on_step=False, on_epoch=True, sync_dist=True, sync_dist_op='sum')
self.log('foo_2', 2, on_step=False, on_epoch=True, sync_dist=True, sync_dist_op='sum')
return acc
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log('bar', torch.tensor(fake_result), on_step=False, on_epoch=True, sync_dist=True, sync_dist_op='sum')
return {"x": loss}
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=2,
weights_summary=None,
)
trainer.fit(model)
assert trainer.logged_metrics['foo'] == fake_result
assert trainer.logged_metrics['foo_2'] == 2
assert trainer.logged_metrics['bar'] == fake_result
@RunIf(min_gpus=2, special=True)
def test_logging_sync_dist_true_ddp(tmpdir):
"""
Tests to ensure that the sync_dist flag works with ddp
"""
class TestLoggingSyncDistModel(BoringModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch[0])
self.log('foo', 1, on_step=False, on_epoch=True, sync_dist=True, sync_dist_op='SUM')
self.log('cho', acc, on_step=False, on_epoch=True)
return acc
def validation_step(self, batch, batch_idx):
self.training_step_called = True
output = self.layer(batch)
loss = self.loss(batch, output)
self.log('bar', 2, on_step=False, on_epoch=True, sync_dist=True, sync_dist_op='AVG')
return {"x": loss}
model = TestLoggingSyncDistModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=2,
weights_summary=None,
accelerator="ddp",
gpus=2,
profiler="pytorch"
)
trainer.fit(model)
assert trainer.logged_metrics['foo'] == 2
assert trainer.logged_metrics['bar'] == 2
@RunIf(min_gpus=1)
def test_logging_sync_dist_true_gpu(tmpdir):
"""
Tests to ensure that the sync_dist flag works with GPU (should just return the original value)
"""
fake_result = 1
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch[0])
self.log('foo', torch.tensor(fake_result), on_step=False, on_epoch=True, sync_dist=True, sync_dist_op='sum')
return acc
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log('bar', torch.tensor(fake_result), on_step=False, on_epoch=True, sync_dist=True, sync_dist_op='sum')
return {"x": loss}
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=2,
gpus=1,
weights_summary=None,
)
trainer.fit(model)
assert trainer.logged_metrics['foo'] == fake_result
assert trainer.logged_metrics['bar'] == fake_result
def test_progress_bar_dict_contains_values_on_train_epoch_end(tmpdir):
class TestModel(BoringModel):
def training_step(self, *args):
self.log("foo", torch.tensor(self.current_epoch), on_step=False, on_epoch=True, prog_bar=True)
return super().training_step(*args)
def on_train_epoch_end(self, *_):
self.on_train_epoch_end_called = True
self.epoch_end_called = True
self.log(
'foo_2',
torch.tensor(self.current_epoch),
prog_bar=True,
on_epoch=True,
sync_dist=True,
sync_dist_op='sum'
)
def on_epoch_end(self):
self.epoch_end_called = True
assert self.trainer.progress_bar_dict["foo"] == self.current_epoch
assert self.trainer.progress_bar_dict["foo_2"] == self.current_epoch
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_train_batches=1,
limit_val_batches=0,
checkpoint_callback=False,
logger=False,
weights_summary=None,
progress_bar_refresh_rate=0,
)
model = TestModel()
trainer.fit(model)
assert model.epoch_end_called
assert model.on_train_epoch_end_called
def test_logging_in_callbacks_with_log_function(tmpdir):
"""
Tests ensure self.log can be used directly in callbacks.
"""
class LoggingCallback(callbacks.Callback):
def on_train_start(self, trainer, pl_module):
self.log("on_train_start", 1)
def on_train_epoch_start(self, trainer, pl_module):
self.log("on_train_epoch_start", 2)
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.log("on_train_batch_end", 3)
def on_batch_end(self, trainer, pl_module):
self.log("on_batch_end", 4)
def on_epoch_end(self, trainer, pl_module):
self.log("on_epoch_end", 5)
def on_train_epoch_end(self, trainer, pl_module, outputs):
self.log("on_train_epoch_end", 6)
self.callback_metrics = trainer.logger_connector.callback_metrics
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
callbacks=[LoggingCallback()]
)
trainer.fit(model)
expected = {
'on_train_start': 1,
'on_train_epoch_start': 2,
'on_train_batch_end': 3,
'on_batch_end': 4,
'on_epoch_end': 5,
'on_train_epoch_end': 6
}
assert trainer.callback_metrics == expected
@RunIf(min_gpus=1)
def test_metric_are_properly_reduced(tmpdir):
class TestingModel(BoringModel):
def __init__(self, *args, **kwargs) -> None:
super().__init__()
self.val_acc = pl.metrics.Accuracy()
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
self.log("train_loss", output["loss"])
return output
def validation_step(self, batch, batch_idx):
preds = torch.tensor([[0.9, 0.1]], device=self.device)
targets = torch.tensor([1], device=self.device)
if batch_idx < 8:
preds = torch.tensor([[0.1, 0.9]], device=self.device)
self.val_acc(preds, targets)
self.log('val_acc', self.val_acc, on_step=True, on_epoch=True)
return super().validation_step(batch, batch_idx)
early_stop = EarlyStopping(monitor='val_acc', mode='max')
checkpoint = ModelCheckpoint(
monitor='val_acc',
save_last=True,
save_top_k=2,
mode='max',
)
model = TestingModel()
trainer = Trainer(
default_root_dir=tmpdir,
gpus=1,
max_epochs=2,
limit_train_batches=5,
limit_val_batches=32,
callbacks=[early_stop, checkpoint]
)
trainer.fit(model)
assert trainer.callback_metrics["val_acc"] == 8 / 32.
assert "train_loss" in trainer.callback_metrics
|
the-stack_106_17835
|
"""
Data_Reduction.DSN
==================
Subclasses for reducing data taken with DSN-like open loop recorders.
Open-loop recorders are raw IF voltage recorders that are not synchronized with
the communications between the spacecraft and the ground station. As such, they
are the most basic kind of recorder possible in radio astronomy, equivalent to
VLBI recorders. Indeed, an early implementation was known as the "VLBI Science
Recorder" (VSR), followed by later varieties of VSR and eventually, the OSR.
OLR recordings at different stations are indeed combined for VLBI measurements
of spacecraft with respect to distant radio sources, a powerful navigation tool.
Raw IF recordings can be computational converted into any of the standard
signal types used in radio astronomy -- square-law detected power, spectra,
Stokes parameters, VLBI U-V maps, high time ans spectral resolution pulsar data,
*etc.*
"""
import datetime
import glob
import logging
import numpy as NP
import os.path
import time
import Data_Reduction as DR
import Data_Reduction.DSN.OLSR as OLSR
import DatesTimes as DT
logger = logging.getLogger(__name__)
class Observation(DR.Observation):
"""
Class for observations based on open-loop recordings
The arguments for the superclass initialization are ``parent`` (typically
``self`` or ``None``), ``name`` (will be set to a YEAR/DOY default if not
provided), ``dss`` (required), ``date`` (YEAR/DOY required), ``start``,
``end``, and ``project`` (required).
"""
def __init__(self, parent=None, name=None, dss=None,
date=None, start=None, end=None,
project=None):
"""
The project, station, and date are needed to locate the directory for the
working files.
The header item ``STATION_ID`` is not correct in files from Malargue.
The start time is in the header as ``TIME_TAG_YEAR``, ``TIME_TAG_DOY``,
``TIME_TAG_SECOND_OF_DAY`` and ``TIMETAG_PICOSECONDS_OF_THE_SECOND``.
These channel metadata can be extracted from the header: ``SAMPLE_RATE`` for
bandwidth, ``RF_TO_IF_DOWNCONV`` for the band and receiver center frequency,
``IF_TO_CHANNEL_DOWNCONV`` for the channel center frequency.
Args:
parent (Session): (optional) session to which this observation belongs
name (str): (optional) an identifier; default is station ID + "obs"
dss (int): (required) station number
date (str): (required) "YEAR/DOY"
start (float): (optional) UNIX time at the start
end (float): (optional) UNIX time at the end
project (str): (required) directory under /usr/local/projects
"""
mylogger = logging.getLogger(logger.name+".Observation")
DR.Observation.__init__(self, parent=parent, name=name, dss=dss, date=date,
project=project)
self.logger = mylogger
class Map(Observation):
"""
"""
def __init__(self):
"""
"""
pass
class Recording(DR.Recording):
"""
Superclass for DSN recordings based on VSR, WVSR, OLR, etc. data files.
This also covers data files made at DSA-3 TTCP recorder
"""
def __init__(self, session=None, path=None, date=None, dss=None, name=None):
"""
"""
DR.Recording.__init__(self, session=session, path=path, date=date, dss=dss,
name=name)
# --------------------------- module functions ---------------------------
def get_file_metadata(project, dss, year, doy, pattern):
"""
"""
projdatapath, sessionpath, rawdatapath = \
DR.get_obs_dirs(project, dss, year, doy, datafmt=None)
files = glob.glob(sessionpath+pattern)
logger.info("get_file_metadata: files: %s", files)
header = {}
metadata = {}
options = {'toPrintHeaders': False}
for fname in files:
fidx = files.index(fname)
fmt = OLSR.checkFormat(fname)
if fidx:
pass
else:
options.update({'format': fmt})
filename = os.path.basename(fname)
# get the first header
if fmt == "RDEF":
header[filename] = OLSR.readHeaders(fname, options)
elif fmt == "VDR":
header.append(OLSR.VDR(fname, options))
elif fmt == "SFDU":
header.append(OLSR.SFDU(fname, options))
else:
logger.error("get_file_metadata: %s has unknown format %s",
filename, fmt)
# get the metadata
metadata[fidx] = {"file": fname,
"bpS": header[filename]["SAMPLE_SIZE"],
"bw": header[filename]["SAMPLE_RATE"]/1e6,
"f_ofst":(header[filename]['RF_TO_IF_DOWNCONV']-31950000000\
+header[filename]['IF_TO_CHANNEL_DOWNCONV'])/1e6,
"freq": (31950000000+header[filename]['IF_TO_CHANNEL_DOWNCONV'])/1e6,
"unixtime": DT.VSR_tuple_to_timestamp(
header[filename]['TIME_TAG_YEAR'],
header[filename]['TIME_TAG_DOY'],
header[filename]['TIME_TAG_SECOND_OF_DAY']
+header[filename]['TIMETAG_PICOSECONDS_OF_THE_SECOND']//1e12),
"year": header[filename]['TIME_TAG_YEAR'],
"DOY": header[filename]['TIME_TAG_DOY']}
metadata[fidx]['size'] = os.path.getsize(fname)
Bps = header[filename]["SAMPLE_RATE"]*header[filename]["SAMPLE_SIZE"]*2/8
metadata[fidx]['duration'] = metadata[fidx]['size']/Bps
return header, metadata
def print_file_metadata(project, dss, year, doy, pattern):
"""
"""
header, metadata = get_file_metadata(project, dss, year, doy, pattern)
print(27*" "+" Freq."+4*" "+"BW File"+32*" "+"duration")
output = []
for fidx in list(metadata.keys()):
output.append("%24s %7.1f %5.1f %38s %4d"
% (time.ctime(metadata[fidx]['unixtime']),
metadata[fidx]['freq'],
metadata[fidx]['bw'],
os.path.basename(metadata[fidx]['file']),
metadata[fidx]['duration']
))
output.sort()
return output
|
the-stack_106_17836
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import csv
from sklearn import metrics
import sys
log_loc0 = 'logs/TTN/TTN_hid1_it1/'
log_loc1 = 'logs/MERA/MERA_hid1_it1/'
log_loc5 = 'logs/MPS/MPS_hid1_it1/'
log_loc2 = 'logs/CGNN/CGNN_hid1_it1/'
log_loc3 = 'logs/CGNN/CGNN_hid5_it1/'
log_loc4 = 'logs/CGNN/CGNN_hid10_it1/'
log_list0 =[
log_loc5,
log_loc0,
log_loc1,
]
log_list1 =[
log_loc2,
log_loc3,
log_loc4,
]
pdf_location = 'pdf/'
png_location = 'png/'
print('Plots will be saved to:')
print('PDF: ' + pdf_location)
print('PNG: ' + png_location)
def file_length(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def read_multiple_files(filename, n_runs):
n_items = file_length(filename+'run3/log_validation.csv')-1
accuracy = np.empty(shape=(n_runs,n_items))
auc = np.empty(shape=(n_runs,n_items))
loss = np.empty(shape=(n_runs,n_items))
precision = np.empty(shape=(n_runs,n_items))
log_list = [filename + 'run' + str(i+1) + '/' for i in range(n_runs)]
for i in range(n_runs):
with open(log_list[i]+'log_validation.csv', 'r') as f:
reader = csv.reader(f, delimiter=',')
validation = np.array(list(reader)).astype(float)
accuracy[i,:] = validation[0:n_items,0]
auc[i,:] = validation[0:n_items,1]
loss[i,:] = validation[0:n_items,2]
precision[i,:] = validation[0:n_items,3]
return accuracy, auc, loss, precision
# read last items
q_auc_std = []
q_auc_mean = []
q_loss_std = []
q_loss_mean = []
for log_loc in log_list0:
#if log_loc == log_loc0: n_items=15
#if log_loc == log_loc1: n_items=10
#if log_loc == log_loc5: n_items=19
_, auc, loss, _ = read_multiple_files(log_loc, n_runs=3)
q_auc_mean = np.append(q_auc_mean, np.mean(auc[:,-1],axis=0))
q_auc_std = np.append(q_auc_std, np.std(auc[:,-1],axis=0))
q_loss_mean = np.append(q_loss_mean, np.mean(loss[:,-1],axis=0))
q_loss_std = np.append(q_loss_std, np.std(loss[:,-1],axis=0))
c_auc_std = []
c_auc_mean = []
c_loss_std = []
c_loss_mean = []
for log_loc in log_list1:
_, auc, loss, _ = read_multiple_files(log_loc, n_runs=3)
c_auc_mean = np.append(c_auc_mean, np.mean(auc[:,-1],axis=0))
c_auc_std = np.append(c_auc_std, np.std(auc[:,-1],axis=0))
c_loss_mean = np.append(c_loss_mean, np.mean(loss[:,-1],axis=0))
c_loss_std = np.append(c_loss_std, np.std(loss[:,-1],axis=0))
c_n_params = [30, 266, 831]
q_n_params = [40, 42, 58]
# Plot AUC
plt.clf()
plt.errorbar(c_n_params,c_auc_mean,yerr=c_auc_std,marker="o", c='navy', label='classical')
plt.errorbar(q_n_params,q_auc_mean,yerr=q_auc_std,marker="o",linestyle="None", c='darkorange', label='quantum')
plt.text(q_n_params[0],q_auc_mean[0],s='MPS-hid1')
plt.text(q_n_params[1],q_auc_mean[1],s='TTN-hid1')
plt.text(q_n_params[2],q_auc_mean[2],s='MERA-hid1')
plt.text(c_n_params[0],c_auc_mean[0]+0.01,s='HepTrkX-hid1')
plt.text(c_n_params[1]-100,c_auc_mean[1]-0.015,s='HepTrkX-hid5')
plt.text(c_n_params[2]-400,c_auc_mean[2]-0.01,s='HepTrkX-hid10')
plt.title('AUC Comparison after 1 epoch')
plt.xlabel('# Parameters')
plt.ylabel('AUC')
plt.xscale('log')
plt.legend(loc='lower right')
plt.grid()
plt.tight_layout()
plt.savefig(pdf_location+'validation_comparison.pdf')
plt.savefig(png_location+'validation_comparison.png')
|
the-stack_106_17839
|
from collections import Counter
from itertools import groupby, product
from devito.ir.clusters import Cluster, ClusterGroup, Queue
from devito.ir.support import TILABLE, Scope
from devito.passes.clusters.utils import cluster_pass
from devito.symbolics import pow_to_mul
from devito.tools import DAG, as_tuple, frozendict, timed_pass
from devito.types import Symbol
__all__ = ['Lift', 'fuse', 'optimize_pows', 'extract_increments']
class Lift(Queue):
"""
Remove invariant Dimensions from Clusters to avoid redundant computation.
Notes
-----
This is analogous to the compiler transformation known as
"loop-invariant code motion".
"""
@timed_pass(name='lift')
def process(self, elements):
return super(Lift, self).process(elements)
def callback(self, clusters, prefix):
if not prefix:
# No iteration space to be lifted from
return clusters
hope_invariant = prefix[-1].dim._defines
outer = set().union(*[i.dim._defines for i in prefix[:-1]])
lifted = []
processed = []
for n, c in enumerate(clusters):
# Increments prevent lifting
if c.has_increments:
processed.append(c)
continue
# Is `c` a real candidate -- is there at least one invariant Dimension?
if any(d._defines & hope_invariant for d in c.used_dimensions):
processed.append(c)
continue
impacted = set(processed) | set(clusters[n+1:])
# None of the Functions appearing in a lifted Cluster can be written to
if any(c.functions & set(i.scope.writes) for i in impacted):
processed.append(c)
continue
# All of the inner Dimensions must appear in the write-to region
# otherwise we would violate data dependencies. Consider
#
# 1) 2) 3)
# for i for i for i
# for x for x for x
# r = f(a[x]) for y for y
# r[x] = f(a[x, y]) r[x, y] = f(a[x, y])
#
# In 1) and 2) lifting is infeasible; in 3) the statement can be lifted
# outside the `i` loop as `r`'s write-to region contains both `x` and `y`
xed = {d._defines for d in c.used_dimensions if d not in outer}
if not all(i & set(w.dimensions) for i, w in product(xed, c.scope.writes)):
processed.append(c)
continue
# The contracted iteration and data spaces
key = lambda d: d not in hope_invariant
ispace = c.ispace.project(key).reset()
dspace = c.dspace.project(key).reset()
# Some properties are dropped
properties = {d: v for d, v in c.properties.items() if key(d)}
properties = {d: v - {TILABLE} for d, v in properties.items()}
lifted.append(c.rebuild(ispace=ispace, dspace=dspace, properties=properties))
return lifted + processed
class Fusion(Queue):
"""
Fuse Clusters with compatible IterationSpace.
"""
def __init__(self, toposort):
super(Fusion, self).__init__()
self.toposort = toposort
def _make_key_hook(self, cgroup, level):
assert level > 0
assert len(cgroup.guards) == 1
return (tuple(cgroup.guards[0].get(i.dim) for i in cgroup.itintervals[:level-1]),)
def process(self, clusters):
cgroups = [ClusterGroup(c, c.itintervals) for c in clusters]
cgroups = self._process_fdta(cgroups, 1)
clusters = ClusterGroup.concatenate(*cgroups)
return clusters
def callback(self, cgroups, prefix):
# Toposort to maximize fusion
if self.toposort:
clusters = self._toposort(cgroups, prefix)
else:
clusters = ClusterGroup(cgroups)
# Fusion
processed = []
for k, g in groupby(clusters, key=self._key):
maybe_fusible = list(g)
if len(maybe_fusible) == 1:
processed.extend(maybe_fusible)
else:
try:
# Perform fusion
fused = Cluster.from_clusters(*maybe_fusible)
processed.append(fused)
except ValueError:
# We end up here if, for example, some Clusters have same
# iteration Dimensions but different (partial) orderings
processed.extend(maybe_fusible)
return [ClusterGroup(processed, prefix)]
def _key(self, c):
# Two Clusters/ClusterGroups are fusion candidates if their key is identical
key = (frozenset(c.itintervals), c.guards)
# We allow fusing Clusters/ClusterGroups with WaitLocks over different Locks,
# while the WithLocks are to be kept separated (i.e. the remain separate tasks)
if isinstance(c, Cluster):
sync_locks = (c.sync_locks,)
else:
sync_locks = c.sync_locks
for i in sync_locks:
key += (frozendict({k: frozenset(type(i) if i.is_WaitLock else i for i in v)
for k, v in i.items()}),)
return key
def _toposort(self, cgroups, prefix):
# Are there any ClusterGroups that could potentially be fused? If
# not, do not waste time computing a new topological ordering
counter = Counter(self._key(cg) for cg in cgroups)
if not any(v > 1 for it, v in counter.most_common()):
return ClusterGroup(cgroups)
# Similarly, if all ClusterGroups have the same exact prefix and
# use the same form of synchronization (if any at all), no need to
# attempt a topological sorting
if len(counter.most_common()) == 1:
return ClusterGroup(cgroups)
dag = self._build_dag(cgroups, prefix)
def choose_element(queue, scheduled):
# Heuristic: let `k0` be the key of the last scheduled node; then out of
# the possible schedulable nodes we pick the one with key `k1` such that
# `max_i : k0[:i] == k1[:i]` (i.e., the one with "the most similar key")
if not scheduled:
return queue.pop()
key = self._key(scheduled[-1])
for i in reversed(range(len(key) + 1)):
candidates = [e for e in queue if self._key(e)[:i] == key[:i]]
try:
# Ensure stability
e = min(candidates, key=lambda i: cgroups.index(i))
except ValueError:
continue
queue.remove(e)
return e
assert False
return ClusterGroup(dag.topological_sort(choose_element))
def _build_dag(self, cgroups, prefix):
"""
A DAG representing the data dependences across the ClusterGroups within
a given scope.
"""
prefix = {i.dim for i in as_tuple(prefix)}
dag = DAG(nodes=cgroups)
for n, cg0 in enumerate(cgroups):
for cg1 in cgroups[n+1:]:
# A Scope to compute all cross-ClusterGroup anti-dependences
rule = lambda i: i.is_cross
scope = Scope(exprs=cg0.exprs + cg1.exprs, rules=rule)
# Optimization: we exploit the following property:
# no prefix => (edge <=> at least one (any) dependence)
# to jump out of this potentially expensive loop as quickly as possible
if not prefix and any(scope.d_all_gen()):
dag.add_edge(cg0, cg1)
# Anti-dependences along `prefix` break the execution flow
# (intuitively, "the loop nests are to be kept separated")
# * All ClusterGroups between `cg0` and `cg1` must precede `cg1`
# * All ClusterGroups after `cg1` cannot precede `cg1`
elif any(i.cause & prefix for i in scope.d_anti_gen()):
for cg2 in cgroups[n:cgroups.index(cg1)]:
dag.add_edge(cg2, cg1)
for cg2 in cgroups[cgroups.index(cg1)+1:]:
dag.add_edge(cg1, cg2)
break
# Any anti- and iaw-dependences impose that `cg1` follows `cg0`
# while not being its immediate successor (unless it already is),
# to avoid they are fused together (thus breaking the dependence)
# TODO: the "not being its immediate successor" part *seems* to be
# a work around to the fact that any two Clusters characterized
# by anti-dependence should have been given a different stamp,
# and same for guarded Clusters, but that is not the case (yet)
elif any(scope.d_anti_gen()) or\
any(i.is_iaw for i in scope.d_output_gen()):
dag.add_edge(cg0, cg1)
index = cgroups.index(cg1) - 1
if index > n and self._key(cg0) == self._key(cg1):
dag.add_edge(cg0, cgroups[index])
dag.add_edge(cgroups[index], cg1)
# Any flow-dependences along an inner Dimension (i.e., a Dimension
# that doesn't appear in `prefix`) impose that `cg1` follows `cg0`
elif any(not (i.cause and i.cause & prefix) for i in scope.d_flow_gen()):
dag.add_edge(cg0, cg1)
# Clearly, output dependences must be honored
elif any(scope.d_output_gen()):
dag.add_edge(cg0, cg1)
return dag
@timed_pass()
def fuse(clusters, toposort=False):
"""
Clusters fusion.
If ``toposort=True``, then the Clusters are reordered to maximize the likelihood
of fusion; the new ordering is computed such that all data dependencies are honored.
"""
return Fusion(toposort=toposort).process(clusters)
@cluster_pass(mode='all')
def optimize_pows(cluster, *args):
"""
Convert integer powers into Muls, such as ``a**2 => a*a``.
"""
return cluster.rebuild(exprs=[pow_to_mul(e) for e in cluster.exprs])
@cluster_pass(mode='sparse')
def extract_increments(cluster, sregistry, *args):
"""
Extract the RHSs of non-local tensor expressions performing an associative
and commutative increment, and assign them to temporaries.
"""
processed = []
for e in cluster.exprs:
if e.is_Increment and e.lhs.function.is_Input:
handle = Symbol(name=sregistry.make_name(), dtype=e.dtype).indexify()
if e.rhs.is_Number or e.rhs.is_Symbol:
extracted = e.rhs
else:
extracted = e.rhs.func(*[i for i in e.rhs.args if i != e.lhs])
processed.extend([e.func(handle, extracted, is_Increment=False),
e.func(e.lhs, handle)])
else:
processed.append(e)
return cluster.rebuild(processed)
|
the-stack_106_17840
|
import requests
from typing import List, Dict
from data_refinery_common.models import (
Batch,
File,
SurveyJobKeyValue,
Organism
)
from data_refinery_foreman.surveyor import utils
from data_refinery_foreman.surveyor.external_source import ExternalSourceSurveyor
from data_refinery_common.job_lookup import ProcessorPipeline, Downloaders
from data_refinery_common.logging import get_and_configure_logger
logger = get_and_configure_logger(__name__)
EXPERIMENTS_URL = "https://www.ebi.ac.uk/arrayexpress/json/v3/experiments/"
SAMPLES_URL = EXPERIMENTS_URL + "{}/samples"
class ArrayExpressSurveyor(ExternalSourceSurveyor):
def source_type(self):
return Downloaders.ARRAY_EXPRESS.value
def determine_pipeline(self, batch: Batch, key_values: Dict = {}) -> ProcessorPipeline:
# If it's a CEL file run SCAN.UPC on it.
if batch.files[0].raw_format == "CEL":
return ProcessorPipeline.AFFY_TO_PCL
# If only processed data is available then we don't need to do
# anything to it
elif batch.files[0].raw_format == batch.files[0].processed_format:
return ProcessorPipeline.NO_OP
# If it's not CEL and it's not already processed then we just
# want to download it for Jackie's grant.
else:
return ProcessorPipeline.NONE
def group_batches(self) -> List[List[Batch]]:
return utils.group_batches_by_first_file(self.batches)
def get_experiment_metadata(self, experiment_accession_code: str) -> Dict:
experiment_request = requests.get(EXPERIMENTS_URL + experiment_accession_code)
parsed_json = experiment_request.json()["experiments"]["experiment"][0]
experiment = {}
experiment["name"] = parsed_json["name"]
experiment["experiment_accession_code"] = experiment_accession_code
# If there is more than one arraydesign listed in the experiment
# then there is no other way to determine which array was used
# for which sample other than looking at the header of the CEL
# file. That obviously cannot happen until the CEL file has been
# downloaded so we can just mark it as UNKNOWN and let the
# downloader inspect the downloaded file to determine the
# array then.
if len(parsed_json["arraydesign"]) == 0:
logger.warn("Experiment %s has no arraydesign listed.",
experiment_accession_code,
survey_job=self.survey_job.id)
experiment["platform_accession_code"] = "UNKNOWN"
elif len(parsed_json["arraydesign"]) > 1:
experiment["platform_accession_code"] = "UNKNOWN"
else:
experiment["platform_accession_code"] = \
parsed_json["arraydesign"][0]["accession"]
experiment["release_date"] = parsed_json["releasedate"]
if "lastupdatedate" in parsed_json:
experiment["last_update_date"] = parsed_json["lastupdatedate"]
else:
experiment["last_update_date"] = parsed_json["releasedate"]
return experiment
def _generate_batches(self,
samples: List[Dict],
experiment: Dict,
replicate_raw: bool = True) -> List[Batch]:
"""Generates a Batch for each sample in samples.
Uses the metadata contained in experiment (which should be
generated via get_experiment_metadata) to add additional
metadata to each Batch. If replicate_raw is True (the default)
then only raw files will be replicated. Otherwise all files
will be replicated.
"""
for sample in samples:
if "file" not in sample:
continue
organism_name = "UNKNOWN"
for characteristic in sample["characteristic"]:
if characteristic["category"].upper() == "ORGANISM":
organism_name = characteristic["value"].upper()
if organism_name == "UNKNOWN":
logger.error("Sample from experiment %s did not specify the organism name.",
experiment["experiment_accession_code"],
survey_job=self.survey_job.id)
organism_id = 0
else:
organism_id = Organism.get_id_for_name(organism_name)
for sample_file in sample["file"]:
# Generally we only want to replicate the raw data if
# we can, however if there isn't raw data then we'll
# take the processed stuff.
if (replicate_raw and sample_file["type"] != "data") \
or sample_file["name"] is None:
continue
# sample_file["comment"] is only a list if there's
# more than one comment...
comments = sample_file["comment"]
if isinstance(comments, list):
# Could be: "Derived ArrayExpress Data Matrix FTP
# file" or: "ArrayExpress FTP file". If there is
# no comment with a name including "FTP file" then
# we don't know where to download it so we need to
# mark this job as an error. Therefore don't catch
# the potential exception where download_url
# doesn't get defined.
for comment in comments:
if comment["name"].find("FTP file") != -1:
download_url = comment["value"]
else:
download_url = comments["value"]
raw_format = sample_file["name"].split(".")[-1]
processed_format = "PCL" if replicate_raw else raw_format
file = File(name=sample_file["name"],
download_url=download_url,
raw_format=raw_format,
processed_format=processed_format,
size_in_bytes=-1) # Will have to be determined later
self.add_batch(platform_accession_code=experiment["platform_accession_code"],
experiment_accession_code=experiment["experiment_accession_code"],
organism_id=organism_id,
organism_name=organism_name,
experiment_title=experiment["name"],
release_date=experiment["release_date"],
last_uploaded_date=experiment["last_update_date"],
files=[file])
def discover_batches(self):
experiment_accession_code = (
SurveyJobKeyValue
.objects
.get(survey_job_id=self.survey_job.id,
key__exact="experiment_accession_code")
.value
)
logger.info("Surveying experiment with accession code: %s.",
experiment_accession_code,
survey_job=self.survey_job.id)
experiment = self.get_experiment_metadata(experiment_accession_code)
r = requests.get(SAMPLES_URL.format(experiment_accession_code))
samples = r.json()["experiment"]["sample"]
self._generate_batches(samples, experiment)
if len(samples) != 0 and len(self.batches) == 0:
# Found no samples with raw data, so replicate the
# processed data instead
self._generate_batches(samples, experiment, replicate_raw=False)
|
the-stack_106_17842
|
# encoding: utf-8
from sqlalchemy.orm import relation
from sqlalchemy import types, Column, Table, ForeignKey, UniqueConstraint
from ckan.model import (
core,
meta,
types as _types,
domain_object,
vocabulary,
)
import ckan # this import is needed
import ckan.model
import ckan.lib.dictization
import ckan.lib.maintain as maintain
__all__ = ['tag_table', 'package_tag_table', 'Tag', 'PackageTag',
'MAX_TAG_LENGTH', 'MIN_TAG_LENGTH']
MAX_TAG_LENGTH = 100
MIN_TAG_LENGTH = 2
tag_table = Table('tag', meta.metadata,
Column('id', types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column('name', types.Unicode(MAX_TAG_LENGTH), nullable=False),
Column('vocabulary_id',
types.Unicode(vocabulary.VOCABULARY_NAME_MAX_LENGTH),
ForeignKey('vocabulary.id')),
UniqueConstraint('name', 'vocabulary_id')
)
package_tag_table = Table('package_tag', meta.metadata,
Column('id', types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column('package_id', types.UnicodeText, ForeignKey('package.id')),
Column('tag_id', types.UnicodeText, ForeignKey('tag.id')),
Column('state', types.UnicodeText, default=core.State.ACTIVE),
)
class Tag(domain_object.DomainObject):
def __init__(self, name='', vocabulary_id=None):
self.name = name
self.vocabulary_id = vocabulary_id
# not stateful so same as purge
def delete(self):
self.purge()
@classmethod
def by_id(cls, tag_id, autoflush=True):
'''Return the tag with the given id, or None.
:param tag_id: the id of the tag to return
:type tag_id: string
:returns: the tag with the given id, or None if there is no tag with
that id
:rtype: ckan.model.tag.Tag
'''
query = meta.Session.query(Tag).filter(Tag.id==tag_id)
query = query.autoflush(autoflush)
tag = query.first()
return tag
@classmethod
def by_name(cls, name, vocab=None, autoflush=True):
'''Return the tag with the given name, or None.
By default only free tags (tags which do not belong to any vocabulary)
are returned.
If the optional argument ``vocab`` is given then only tags from that
vocabulary are returned, or ``None`` if there is no tag with that name
in that vocabulary.
:param name: the name of the tag to return
:type name: string
:param vocab: the vocabulary to look in (optional, default: None)
:type vocab: ckan.model.vocabulary.Vocabulary
:returns: the tag object with the given id or name, or None if there is
no tag with that id or name
:rtype: ckan.model.tag.Tag
'''
if vocab:
query = meta.Session.query(Tag).filter(Tag.name==name).filter(
Tag.vocabulary_id==vocab.id)
else:
query = meta.Session.query(Tag).filter(Tag.name==name).filter(
Tag.vocabulary_id==None)
query = query.autoflush(autoflush)
tag = query.first()
return tag
@classmethod
def get(cls, tag_id_or_name, vocab_id_or_name=None):
'''Return the tag with the given id or name, or None.
By default only free tags (tags which do not belong to any vocabulary)
are returned.
If the optional argument ``vocab_id_or_name`` is given then only tags
that belong to that vocabulary will be returned, and ``None`` will be
returned if there is no vocabulary with that vocabulary id or name or
if there is no tag with that tag id or name in that vocabulary.
:param tag_id_or_name: the id or name of the tag to return
:type tag_id_or_name: string
:param vocab_id_or_name: the id or name of the vocabulary to look for
the tag in
:type vocab_id_or_name: string
:returns: the tag object with the given id or name, or None if there is
no tag with that id or name
:rtype: ckan.model.tag.Tag
'''
vocab = None
if vocab_id_or_name:
vocab = vocabulary.Vocabulary.get(vocab_id_or_name)
if vocab is None:
# The user specified an invalid vocab.
return None
tag = Tag.by_id(tag_id_or_name)
if not tag:
return Tag.by_name(tag_id_or_name, vocab=vocab)
elif vocab and tag.vocabulary_id != vocab.id:
return None
return tag
@classmethod
@maintain.deprecated(since="2.9.0")
def search_by_name(cls, search_term, vocab_id_or_name=None):
'''DEPRECATED
Return all tags whose names contain a given string.
By default only free tags (tags which do not belong to any vocabulary)
are returned. If the optional argument ``vocab_id_or_name`` is given
then only tags from that vocabulary are returned.
:param search_term: the string to search for in the tag names
:type search_term: string
:param vocab_id_or_name: the id or name of the vocabulary to look in
(optional, default: None)
:type vocab_id_or_name: string
:returns: a list of tags that match the search term
:rtype: list of ckan.model.tag.Tag objects
'''
if vocab_id_or_name:
vocab = vocabulary.Vocabulary.get(vocab_id_or_name)
if vocab is None:
# The user specified an invalid vocab.
return None
query = meta.Session.query(Tag).filter(Tag.vocabulary_id==vocab.id)
else:
query = meta.Session.query(Tag)
search_term = search_term.strip().lower()
query = query.filter(Tag.name.contains(search_term))
query = query.distinct().join(Tag.package_tags)
return query
@classmethod
def all(cls, vocab_id_or_name=None):
'''Return all tags that are currently applied to any dataset.
By default only free tags (tags which do not belong to any vocabulary)
are returned. If the optional argument ``vocab_id_or_name`` is given
then only tags from that vocabulary are returned.
:param vocab_id_or_name: the id or name of the vocabulary to look in
(optional, default: None)
:type vocab_id_or_name: string
:returns: a list of all tags that are currently applied to any dataset
:rtype: list of ckan.model.tag.Tag objects
'''
if vocab_id_or_name:
vocab = vocabulary.Vocabulary.get(vocab_id_or_name)
if vocab is None:
# The user specified an invalid vocab.
raise ckan.logic.NotFound("could not find vocabulary '%s'"
% vocab_id_or_name)
query = meta.Session.query(Tag).filter(Tag.vocabulary_id==vocab.id)
else:
query = meta.Session.query(Tag).filter(Tag.vocabulary_id == None)
query = query.distinct().join(PackageTag)
query = query.filter_by(state='active')
return query
@property
def packages(self):
'''Return a list of all packages that have this tag, sorted by name.
:rtype: list of ckan.model.package.Package objects
'''
q = meta.Session.query(ckan.model.package.Package)
q = q.join(PackageTag)
q = q.filter_by(tag_id=self.id)
q = q.filter_by(state='active')
q = q.order_by(ckan.model.package.Package.name)
packages = q.all()
return packages
def __repr__(self):
return '<Tag %s>' % self.name
class PackageTag(core.StatefulObjectMixin,
domain_object.DomainObject):
def __init__(self, package=None, tag=None, state=None, **kwargs):
self.package = package
self.tag = tag
self.state = state
for k,v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
s = u'<PackageTag package=%s tag=%s>' % (self.package.name, self.tag.name)
return s.encode('utf8')
@classmethod
@maintain.deprecated(since="2.9.0")
def by_name(cls, package_name, tag_name, vocab_id_or_name=None,
autoflush=True):
'''DEPRECATED (and broken - missing the join to Tag)
Return the PackageTag for the given package and tag names, or None.
By default only PackageTags for free tags (tags which do not belong to
any vocabulary) are returned. If the optional argument
``vocab_id_or_name`` is given then only PackageTags for tags from that
vocabulary are returned.
:param package_name: the name of the package to look for
:type package_name: string
:param tag_name: the name of the tag to look for
:type tag_name: string
:param vocab_id_or_name: the id or name of the vocabulary to look for
the tag in
:type vocab_id_or_name: string
:returns: the PackageTag for the given package and tag names, or None
if there is no PackageTag for those package and tag names
:rtype: ckan.model.tag.PackageTag
'''
if vocab_id_or_name:
vocab = vocabulary.Vocabulary.get(vocab_id_or_name)
if vocab is None:
# The user specified an invalid vocab.
return None
query = (meta.Session.query(PackageTag, Tag, ckan.model.Package)
.filter(Tag.vocabulary_id == vocab.id)
.filter(ckan.model.Package.name==package_name)
.filter(Tag.name==tag_name))
else:
query = (meta.Session.query(PackageTag)
.filter(ckan.model.Package.name==package_name)
.filter(Tag.name==tag_name))
query = query.autoflush(autoflush)
return query.one()[0]
def related_packages(self):
return [self.package]
meta.mapper(Tag, tag_table, properties={
'package_tags': relation(PackageTag, backref='tag',
cascade='all, delete, delete-orphan',
),
'vocabulary': relation(vocabulary.Vocabulary,
order_by=tag_table.c.name)
}
)
# NB meta.mapper(tag.PackageTag... is found in package.py, because if it was
# here it we'd get circular references
|
the-stack_106_17845
|
# Copyright [yyyy] [name of copyright owner]
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" DropBlock, DropPath
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
Papers:
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
Code:
DropBlock impl inspired by two Tensorflow impl that I liked:
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def drop_block_2d(
x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0,
with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
runs with success, but needs further validation and possibly optimization for lower runtime impact.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
# seed_drop_rate, the gamma parameter
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
# Forces the block to be inside the feature map.
w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device))
valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
if batchwise:
# one mask for whole batch, quite a bit faster
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
else:
uniform_noise = torch.rand_like(x)
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
block_mask = -F.max_pool2d(
-block_mask,
kernel_size=clipped_block_size, # block_size,
stride=1,
padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
if inplace:
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
else:
x = x * block_mask + normal_noise * (1 - block_mask)
else:
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
def drop_block_fast_2d(
x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7,
gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
block mask at edges.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
if batchwise:
# one mask for whole batch, quite a bit faster
block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma
else:
# mask per batch element
block_mask = torch.rand_like(x) < gamma
block_mask = F.max_pool2d(
block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
if inplace:
x.mul_(1. - block_mask).add_(normal_noise * block_mask)
else:
x = x * (1. - block_mask) + normal_noise * block_mask
else:
block_mask = 1 - block_mask
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
class DropBlock2d(nn.Module):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
"""
def __init__(self,
drop_prob=0.1,
block_size=7,
gamma_scale=1.0,
with_noise=False,
inplace=False,
batchwise=False,
fast=True):
super(DropBlock2d, self).__init__()
self.drop_prob = drop_prob
self.gamma_scale = gamma_scale
self.block_size = block_size
self.with_noise = with_noise
self.inplace = inplace
self.batchwise = batchwise
self.fast = fast # FIXME finish comparisons of fast vs not
def forward(self, x):
if not self.training or not self.drop_prob:
return x
if self.fast:
return drop_block_fast_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
else:
return drop_block_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
|
the-stack_106_17847
|
import maya.cmds as mc
import math
#find angle by givin line(2 points) and vertrical line, first point should be the center
def angleIB(x0, y0, x1, y1):
from math import degrees, atan2
a = (degrees( atan2(y1-y0, x1-x0) ) + 90) / 180
return a
#find the 2D distance
def distance(x0, y0, x1, y1):
from math import sqrt, pow
return sqrt(pow(x1-x0, 2) + pow(y1-y0, 2))
#find position of input object or component
def positionOf(obj):
return mc.xform(obj, q=True, ws=True, t=True)
#position input object or component to x,y,z
def place(obj, x, y, z) :
mc.xform(obj, ws=True, t=(x, y, z))
# invertBend body function
def invertBend ( ) :
# store object selection
selected = mc.ls (sl=True)
# create manipulator
manCircle = mc.circle (ch=True,o=True,sw=-180, name="manCircleIB")
mc.move (0,1,0,r=False)
locBase = mc.spaceLocator (p=(0,0,0), n="manBaseIB")
locCenter = mc.spaceLocator (p=(0,0,0), n="manCenterIB",r=True)
mc.move (0,1,0,r=False)
distanceNode = mc.createNode ("distanceBetween", n="raduisIB")
mc.connectAttr (locBase[0]+".translate", distanceNode+".point1")
mc.connectAttr (locCenter[0]+".translate", distanceNode+".point2")
mc.connectAttr (distanceNode+".distance", manCircle[1]+".radius")
mc.connectAttr (distanceNode+".distance", manCircle[0]+".ty")
mc.setAttr (manCircle[0]+".sp", 0, -1, 0)
mc.setAttr (manCircle[0]+".rp", 0, -1, 0)
mc.connectAttr (locBase[0]+".translateX", locCenter[0]+".translateX", f=True)
mc.connectAttr (locBase[0]+".translateZ", locCenter[0]+".translateZ", f=True)
mc.parent (manCircle[0], locBase[0])
mc.select (clear = True)
# store all vtx in one array
vtxCount = mc.polyEvaluate (selected[0], v=True)
mc.select (selected[0]+".vtx [0:%d]" % (vtxCount - 1))
vtxArray = mc.ls (sl=True, fl=True)
mc.select (clear=True)
#confirm button
result = mc.confirmDialog(
title="invertBend",
message="ready to invertBend?",
button=['OK', 'Cancel'],
defaultButton='OK',
cancelButton='Cancel',
dismissString='Cancel')
if result == "OK" :
for vtx in vtxArray :
vtxPos = positionOf(vtx)
cenPos = positionOf(locCenter)
basePos = positionOf(locBase)
rad = cenPos[1]-basePos[1]
vtxR = distance(cenPos[0],cenPos[1],vtxPos[0],vtxPos[1])
vtxA = angleIB(cenPos[0],cenPos[1],vtxPos[0],vtxPos[1])
flatX = rad * math.pi * vtxA
flatY = basePos[1]+ ( cenPos[1]-vtxR )
place (vtx,flatX,flatY,vtxPos[2])
#call invertBend
invertBend()
|
the-stack_106_17848
|
# SEÑALES ENVIADAS POR EL CHAT
# > LAMADA DEL CHAT A LA CREACIÓN DEL CAMINO
def on_chat_crea_camino():
agent.teleport(world(44, 4, 7), SOUTH)
crea_camino()
player.on_chat("crea_camino", on_chat_crea_camino)
# > LAMADA DEL CHAT A LA CREACIÓN DEL CAMINO
def on_chat_recorre_camino():
agent.teleport(world(44, 4, 7), SOUTH)
recorre_camino()
player.on_chat("recorre_camino", on_chat_recorre_camino)
# FUNCIÓN PARA COLOCAR ORO
def poner_oro(num: number):
for index5 in range(num):
agent.destroy(DOWN)
agent.set_item(GOLD_BLOCK, 1, 1)
agent.place(DOWN)
agent.move(FORWARD, 1)
agent.collect_all()
agent.teleport(world(14, 4, 7), SOUTH)
# FUNCIÓN PARA CREAR EL CAMINO
def crea_camino():
poner_oro(10)
agent.turn(LEFT_TURN)
poner_oro(3)
agent.turn(RIGHT_TURN)
poner_oro(10)
agent.teleport(world(44, 4, 17), SOUTH)
agent.turn(RIGHT_TURN)
poner_oro(3)
agent.turn(LEFT_TURN)
poner_oro(10)
# FUNCIÓN PARA RECORRER EL CAMINO CORRECTO
# FUNCIÓN QUE IMPLEMENTA LA LÓGICA DE IF/ELIF/ELSE
def recorre_camino():
agent.move(FORWARD, 10)
if agent.inspect(AgentInspection.BLOCK, DOWN) == IRON_BLOCK:
agent.turn_left()
agent.move(FORWARD, 3)
agent.turn_right()
agent.move(FORWARD, 9)
agent.move(UP, 5)
elif agent.inspect(AgentInspection.BLOCK, DOWN) == REDSTONE_BLOCK:
agent.turn_right()
agent.move(FORWARD, 3)
agent.turn_left()
agent.move(FORWARD, 9)
agent.move(UP, 5)
else:
agent.teleport_to_player()
|
the-stack_106_17849
|
import csv
from datetime import datetime
zeek_dict = {
'dhcp.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'mac', 'assigned_ip', 'lease_time',
'trans_id'],
'dns.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'proto', 'port', 'query', 'qclass',
'qclass_name', 'qtype', 'qtype_name', 'rcode', 'rcode_name', 'QR', 'AA', 'TC', 'RD', 'Z', 'answers',
'TTLs', 'rejected'],
'ftp.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'user', 'password', 'command',
'arg', 'mime_type', 'file_size', 'reply_code', 'reply_msg', 'passive', 'orig_h', 'resp_h', 'resp_p',
'fuid'],
'ssh.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'status', 'direction', 'client',
'server', 'resp_size'],
'files.log': ['ts', 'fuid', 'tx_hosts', 'rx_hosts', 'conn_uids', 'source', 'depth', 'analyzers', 'mime_type',
'filename', 'duration', 'local_orig', 'is_orig', 'seen_bytes', 'total_bytes', 'missing_bytes',
'overflow_bytes', 'timedout', 'parent_fuid', 'md5_sha1_sha256', 'extracted'],
'http.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'trans_depth', 'method', 'host',
'uri', 'referrer', 'user_agent', 'request_ body_len', 'response_ body_len', 'status_code',
'status_msg', 'info_code', 'info_msg', 'filename', 'tags', 'username', 'password', 'proxied',
'orig_fuids', 'orig_mime_types', 'resp_fuids', 'resp_mime_types'],
'notice.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'fuid', 'file_mime_type',
'file_desc', 'proto', 'note', 'msg', 'sub', 'src', 'dst', 'p', 'n', 'peer_descr', 'actions',
'suppress_for', 'dropped'],
'smtp.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'proto', 'trans_depth', 'helo',
'mailfrom', 'rcptto', 'date', 'from', 'to', 'in_reply_to', 'subject', 'x_originating_ip',
'first_received', 'second_received', 'last_reply', 'path', 'user_agent', 'tls', 'fuids',
'is_webmail'],
'ssl.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'version', 'cipher', 'server_name',
'session_id', 'subject', 'issuer_subject', 'not_valid_before', 'not_valid_after', 'last_alert',
'client_subject', 'clnt_issuer_subject', 'cer_hash', 'validation_status'],
'tunnel.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'tunnel_type', 'action'],
'weird.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'name', 'addl', 'notice', 'peer'],
'conn.log': ['ts', 'uid', 'id_orig_h', 'id_orig_p', 'id_resp_h', 'id_resp_p', 'proto', 'service', 'duration',
'orig_bytes', 'resp_bytes', 'conn_state', 'local_orig', 'local_resp', 'missed_bytes', 'history',
'orig_pkts', 'orig_ip_bytes', 'resp_pkts', 'resp_ip_bytes', 'tunnel_parents']}
def zeek_to_csv(file_in_path, file_out_path, log_type):
# TODO make this have error aware input
# Create a log file into a csv file so we can manipulate it with pandas
out = file_out_path + '/' + log_type + '.csv'
print('SAVING TO: ' + out)
with open(out, 'w+', encoding='utf-8') as csv_file:
w = csv.writer(csv_file, dialect='excel')
with open(file_in_path, encoding='utf8') as file:
lines = file.read().split('\n')
# print('Adding Header for type: ' + log_type + '.log')
files = [zeek_dict[log_type + '.log']]
cnt = 0
total = 0
for line in lines:
files.append(line.split('\t'))
cnt += 1
total += 1
if cnt >= 10000:
w.writerows(files)
cnt = 0
files = [zeek_dict[log_type + '.log']]
if files:
w.writerows(files)
print(total)
if __name__ == '__main__':
start = datetime.now()
in_path = '../data/zeek/full'
out_path = '../data/test'
print('Starting conversion...')
zeek_to_csv(file_in_path=f'{in_path}/conn.log',
file_out_path=f'{out_path}',
log_type='conn')
print('done')
zeek_to_csv(file_in_path=f'{in_path}/dns.log',
file_out_path=f'{out_path}',
log_type='dns')
print('done')
zeek_to_csv(file_in_path=f'{in_path}/ftp.log',
file_out_path=f'{out_path}',
log_type='ftp')
print('done')
zeek_to_csv(file_in_path=f'{in_path}/files.log',
file_out_path=f'{out_path}',
log_type='files')
print('done')
zeek_to_csv(file_in_path=f'{in_path}/smtp.log',
file_out_path=f'{out_path}',
log_type='smtp')
print('done')
zeek_to_csv(file_in_path=f'{in_path}/ssh.log',
file_out_path=f'{out_path}',
log_type='ssh')
print('done')
zeek_to_csv(file_in_path=f'{in_path}/weird.log',
file_out_path=f'{out_path}',
log_type='weird')
print('done')
zeek_to_csv(file_in_path=f'{in_path}/http.log',
file_out_path=f'{out_path}',
log_type='http')
print('time taken: ', end='')
print(datetime.now() - start)
|
the-stack_106_17850
|
"""
***************************************************************************************
MP3 audio player based on Rpi Pico
audio data is stored in onboard flash filesystem Up to 800 KB of sound capacity
plays one clip of sound (random) on every power on
and uses external circuit for very low power
***************************************************************************************
"""
import board
import digitalio
import analogio
import audiomp3
import audiopwmio
import os
import pwmio
import microcontroller
import random
"""
*****************************************************
TEPT5700 sensor with 10K resistor aprox ADC readings
*****************************************************
LUX(MS6610) RAW VOLT
48 0x0a8 0.135352
33 0x070 0.090234
21 0x054 0.067676
14 0x03b 0.047534
7 0x033 0.041089
6 0x029 0.033032
5 0x031 0.039478
4 0x026 0.030615
"""
light_sensor_threshold = 0.030615
analog_gpio = board.GP26_A0
poweroff_gpio = board.GP14
audio_gpio = board.GP28_A2
ext_led_gpio = board.GP10
default_next_hour = 1
night_time = 0
NUMBER_OF_FILES=12
# Do this ASAP! to keep board powered on
poweroff = digitalio.DigitalInOut(poweroff_gpio)
poweroff.direction = digitalio.Direction.OUTPUT
poweroff.value = False
led = digitalio.DigitalInOut(board.LED)
led.direction = digitalio.Direction.OUTPUT
led.value = True
light_sensor = analogio.AnalogIn(analog_gpio)
audio = audiopwmio.PWMAudioOut(audio_gpio)
#Generating random number to know which file to play
internal_adc_value=microcontroller.cpu.temperature
float_adc_value = 10000 * internal_adc_value
int_adc_value = int(float_adc_value)
random.seed(int_adc_value)
random_number=random.randint(1,NUMBER_OF_FILES)
print("float adc",float_adc_value)
print("int adc",int_adc_value)
print("random number",random_number)
next_hour=random_number;
# Check light sensor to know if it's night time
raw_adc = light_sensor.value
light_voltage = ( raw_adc / 65535 ) * light_sensor.reference_voltage
print("raw adc",raw_adc)
print("light voltage",light_voltage)
if light_voltage < light_sensor_threshold :
night_time = 1
print("it's nightime!")
else :
night_time = 0
print("it's daytime!")
if next_hour == 1 :
print("playing sound 1")
decoder = audiomp3.MP3Decoder(open("1.mp3", "rb"))
if next_hour == 2 :
print("playing sound 2")
decoder = audiomp3.MP3Decoder(open("2.mp3", "rb"))
if next_hour == 3 :
print("playing sound 3")
decoder = audiomp3.MP3Decoder(open("3.mp3", "rb"))
if next_hour == 4 :
print("playing sound 4")
decoder = audiomp3.MP3Decoder(open("4.mp3", "rb"))
if next_hour == 5 :
print("playing sound 5")
decoder = audiomp3.MP3Decoder(open("5.mp3", "rb"))
if next_hour == 6 :
print("playing sound 6")
decoder = audiomp3.MP3Decoder(open("6.mp3", "rb"))
if next_hour == 7 :
print("playing sound 7")
decoder = audiomp3.MP3Decoder(open("7.mp3", "rb"))
if next_hour == 8 :
print("playing sound 8")
decoder = audiomp3.MP3Decoder(open("8.mp3", "rb"))
if next_hour == 9 :
print("playing sound 9")
decoder = audiomp3.MP3Decoder(open("9.mp3", "rb"))
if next_hour == 10 :
print("playing sound 10")
decoder = audiomp3.MP3Decoder(open("10.mp3", "rb"))
if next_hour == 11 :
print("playing sound 11")
decoder = audiomp3.MP3Decoder(open("11.mp3", "rb"))
if next_hour == 12 :
print("playing sound 12")
decoder = audiomp3.MP3Decoder(open("12.mp3", "rb"))
#Only play sounds on DayLight
blink_led=pwmio.PWMOut(ext_led_gpio,duty_cycle=50,frequency=1,variable_frequency=False)
if night_time == 0 :
audio.play(decoder)
while audio.playing:
pass
print("Done playing!")
blink_led.duty_cycle=0
led.value = False
poweroff.value = True
poweroff.direction = digitalio.Direction.INPUT
#Just stop here
while True:
pass
|
the-stack_106_17851
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from fnmatch import fnmatch
from ipaddress import ip_address
from urllib.parse import urlparse
from string import ascii_letters
from . import arg, PatternExtractor
from .. import RefineryCriticalException
from ...lib.patterns import indicators
class xtp(PatternExtractor):
"""
Extract Patterns: Uses regular expressions to extract indicators from the
input data and optionally filters these results heuristically. The unit is
designed to extract indicators such as domain names and IP addresses, see
below for a complete list. To extract data formats such as hex-encoded
data, use `refinery.carve`.
"""
def __init__(
self,
*pattern: arg('pattern', type=str, default=('hostname', 'url', 'email'), help=(
'Choose the pattern to extract, defaults are hostname, url, and email. '
'Use an asterix character to select all available patterns. The available '
'patterns are: {}'.format(', '.join(p.name for p in indicators)))),
filter: arg('-f', dest='filter', action='count', help=(
'If this setting is enabled, the xtp unit will attempt to reduce the number '
'of false positives by certain crude heuristics. Specify multiple times to '
'make the filtering more aggressive.')) = 0,
min=1, max=None, len=None, stripspace=False, duplicates=False, longest=False, take=None
):
self.superinit(super(), **vars(), ascii=True, utf16=True)
patterns = {
p for name in pattern for p in indicators if fnmatch(p.name, name)
}
if indicators.hostname in patterns:
patterns.remove(indicators.hostname)
patterns.add(indicators.ipv4)
patterns.add(indicators.domain)
patterns = [F'(?P<{p.name}>{p.value})' for p in patterns]
if not patterns:
raise RefineryCriticalException('The given mask does not match any known indicator pattern.')
pattern = '|'.join(patterns)
self.log_debug(F'using pattern: {pattern}')
self.args.pattern = re.compile(pattern.encode(self.codec))
self.args.filter = filter
_ALPHABETIC = ascii_letters.encode('ASCII')
_LEGITIMATE_HOSTS = {
'acm.org' : 1,
'adobe.com' : 1,
'aka.ms' : 1,
'apache.org' : 1,
'apple.com' : 1,
'archive.org' : 2,
'azure.com' : 1,
'baidu.com' : 2,
'comodo.net' : 1,
'comodoca.com' : 1,
'curl.haxx.se' : 1,
'digicert.com' : 1,
'github.com' : 3,
'globalsign.com' : 1,
'globalsign.net' : 1,
'google.com' : 3,
'gov' : 2,
'iana.org' : 1,
'live.com' : 1,
'microsoft.com' : 1,
'msdn.com' : 1,
'msn.com' : 1,
'office.com' : 1,
'openssl.org' : 1,
'openxmlformats.org' : 1,
'purl.org' : 1,
'python.org' : 1,
'skype.com' : 1,
'sourceforge.net' : 3,
'sway-cdn.com' : 1,
'sway-extensions.com' : 1,
'symantec.com' : 1,
'symauth.com' : 1,
'symcb.com' : 1,
'symcd.com' : 1,
'thawte.com' : 1,
'verisign.com' : 1,
'w3.org' : 1,
'wikipedia.org' : 1,
'wolfram.com' : 1,
'xml.org' : 1,
'xmlsoap.org' : 1,
'yahoo.com' : 1,
}
_DOMAIN_WHITELIST = [
'system.net',
'wscript.shell',
]
def _check_match(self, data, pos, name, value):
if name == 'ipv4':
ocets = [int(x) for x in value.split(B'.')]
if ocets.count(0) >= 3:
return None
for area in (
data[pos - 20 : pos + 20],
data[pos * 2 - 40 : pos * 2 + 40 : 2],
data[pos * 2 - 41 : pos * 2 + 39 : 2]
):
if B'version' in area.lower():
return None
ip = ip_address(value.decode(self.codec))
if not ip.is_global:
if self.args.filter > 3 or not ip.is_private:
return None
elif name in ('url', 'socket', 'domain', 'subdomain'):
ioc = value.decode(self.codec)
if '://' not in ioc: ioc = F'TCP://{ioc}'
host = urlparse(ioc).netloc.split(':', 1)[0]
hlow = host.lower()
for white, level in self._LEGITIMATE_HOSTS.items():
if level <= self.args.filter and hlow == white or hlow.endswith(F'.{white}'):
return None
if any(hlow == w for w in self._DOMAIN_WHITELIST):
return None
if name.endswith('domain'):
hostparts = host.split('.')
# These heuristics attempt to filter out member access to variables in
# scripts which can be mistaken for domains because of the TLD inflation
# we've had.
uppercase = sum(1 for c in host if c.isalpha() and c.upper() == c)
lowercase = sum(1 for c in host if c.isalpha() and c.lower() == c)
if lowercase and uppercase:
caseratio = uppercase / lowercase
if 0.1 < caseratio < 0.9:
return None
if all(x.isidentifier() for x in hostparts):
if len(hostparts) == 2 and hostparts[0] == 'this':
return None
if len(hostparts[-2]) < 3:
return None
if any(x.startswith('_') for x in hostparts):
return None
if len(hostparts[-1]) > 3:
seen_before = len(set(re.findall(
R'{}(?:\.\w+)+'.format(hostparts[0]).encode('ascii'), data)))
if seen_before > 2:
return None
elif name == 'email':
at = value.find(B'@')
ix = 0
while value[ix] not in self._ALPHABETIC:
ix += 1
return None if at - ix < 3 else value[ix:]
elif name == 'path':
if len(value) < 8:
return None
if len(value) > 16 and len(re.findall(RB'\\x\d\d', value)) > len(value) // 10:
return None
return value
def process(self, data):
whitelist = set()
def check(match):
for name, value in match.groupdict().items():
if value is not None:
break
else:
raise RefineryCriticalException('Received empty match.')
if value in whitelist:
return None
result = self._check_match(data, match.start(), name, value)
if result is not None:
return result
whitelist.add(value)
self.log_debug(self.args.pattern)
transforms = [] if not self.args.filter else [check]
yield from self.matches_filtered(memoryview(data), self.args.pattern, *transforms)
|
the-stack_106_17853
|
escolha = 0
while escolha != 5:
num1 = int(input("\nDigite um número: "))
num2 = int(input("\nDigite outro número: "))
result = 0
escolha = int(input("""Escolha uma das opções e digite:
[1] - soma
[2] - subtrair
[3] - multiplicar
[4] - dividir
Digite sua escolha: """))
if escolha == 1:
result = num1 + num2
elif escolha == 2:
result = num1 - num2
elif escolha == 3:
result = num1 * num2
elif escolha == 4:
result = num1 / num2
print("O resultado da conta é: {}".format(result))
escolha = int(input("Deseja sair do programa[5 para sair]: "))
|
the-stack_106_17856
|
# import the necessary packages
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
class CentroidTracker:
def __init__(self, maxDisappeared=50, maxDistance=50):
# initialize the next unique object ID along with two ordered
# dictionaries used to keep track of mapping a given object
# ID to its centroid and number of consecutive frames it has
# been marked as "disappeared", respectively
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
# store the number of maximum consecutive frames a given
# object is allowed to be marked as "disappeared" until we
# need to deregister the object from tracking
self.maxDisappeared = maxDisappeared
# store the maximum distance between centroids to associate
# an object -- if the distance is larger than this maximum
# distance we'll start to mark the object as "disappeared"
self.maxDistance = maxDistance
def register(self, centroid):
# when registering an object we use the next available object
# ID to store the centroid
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
# to deregister an object ID we delete the object ID from
# both of our respective dictionaries
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rects):
# check to see if the list of input bounding box rectangles
# is empty
if len(rects) == 0:
# loop over any existing tracked objects and mark them
# as disappeared
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1
# if we have reached a maximum number of consecutive
# frames where a given object has been marked as
# missing, deregister it
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# return early as there are no centroids or tracking info
# to update
return self.objects
# initialize an array of input centroids for the current frame
inputCentroids = np.zeros((len(rects), 2), dtype="int")
# loop over the bounding box rectangles
for (i, (startX, startY, endX, endY)) in enumerate(rects):
# use the bounding box coordinates to derive the centroid
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)
# if we are currently not tracking any objects take the input
# centroids and register each of them
if len(self.objects) == 0:
for i in range(0, len(inputCentroids)):
self.register(inputCentroids[i])
# otherwise, are are currently tracking objects so we need to
# try to match the input centroids to existing object
# centroids
else:
# grab the set of object IDs and corresponding centroids
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
# compute the distance between each pair of object
# centroids and input centroids, respectively -- our
# goal will be to match an input centroid to an existing
# object centroid
D = dist.cdist(np.array(objectCentroids), inputCentroids)
# in order to perform this matching we must (1) find the
# smallest value in each row and then (2) sort the row
# indexes based on their minimum values so that the row
# with the smallest value as at the *front* of the index
# list
rows = D.min(axis=1).argsort()
# next, we perform a similar process on the columns by
# finding the smallest value in each column and then
# sorting using the previously computed row index list
cols = D.argmin(axis=1)[rows]
# in order to determine if we need to update, register,
# or deregister an object we need to keep track of which
# of the rows and column indexes we have already examined
usedRows = set()
usedCols = set()
# loop over the combination of the (row, column) index
# tuples
for (row, col) in zip(rows, cols):
# if we have already examined either the row or
# column value before, ignore it
if row in usedRows or col in usedCols:
continue
# if the distance between centroids is greater than
# the maximum distance, do not associate the two
# centroids to the same object
if D[row, col] > self.maxDistance:
continue
# otherwise, grab the object ID for the current row,
# set its new centroid, and reset the disappeared
# counter
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
# indicate that we have examined each of the row and
# column indexes, respectively
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet
# examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# in the event that the number of object centroids is
# equal or greater than the number of input centroids
# we need to check and see if some of these objects have
# potentially disappeared
if D.shape[0] >= D.shape[1]:
# loop over the unused row indexes
for row in unusedRows:
# grab the object ID for the corresponding row
# index and increment the disappeared counter
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check to see if the number of consecutive
# frames the object has been marked "disappeared"
# for warrants deregistering the object
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# otherwise, if the number of input centroids is greater
# than the number of existing object centroids we need to
# register each new input centroid as a trackable object
else:
for col in unusedCols:
self.register(inputCentroids[col])
# return the set of trackable objects
return self.objects
|
the-stack_106_17859
|
#!./bin/python
# ---------------------------------------------------------------------
# MRTHandler
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import logging
import asyncio
# Third-party modules
import orjson
# Python modules
from noc.core.service.authhandler import AuthRequestHandler
from noc.core.service.error import RPCRemoteError, RPCError
from noc.core.perf import metrics
from noc.core.span import Span
from noc.sa.models.managedobject import ManagedObject
from noc.sa.models.useraccess import UserAccess
from noc.core.debug import error_report
from noc.core.error import ERR_UNKNOWN
from noc.config import config
from noc.core.comp import smart_text
logger = logging.getLogger(__name__)
class MRTRequestHandler(AuthRequestHandler):
async def write_chunk(self, obj):
data = smart_text(orjson.dumps(obj))
self.write("%s|%s" % (len(data), data))
logger.debug("%s|%s" % (len(data), data))
await self.flush()
async def run_script(self, oid, script, args, span_id=0, bi_id=None):
with Span(
server="MRT",
service="run_script",
sample=int(config.mrt.enable_command_logging),
in_label=bi_id or oid,
parent=span_id,
client=self.current_user,
) as span:
try:
await self.write_chunk({"id": str(oid), "running": True})
logger.debug("[%s] Run script %s %s %s", span.context, oid, script, args)
r = await self.service.sae.script(oid, script, args)
metrics["mrt_success"] += 1
except RPCRemoteError as e:
span.set_error_from_exc(e, getattr(e, "remote_code", 1))
return {"id": str(oid), "error": str(e)}
except RPCError as e:
logger.error("RPC Error: %s" % str(e))
span.set_error_from_exc(e, getattr(e, "code", 1))
return {"id": str(oid), "error": str(e)}
except Exception as e:
error_report()
metrics["mrt_failed"] += 1
span.set_error_from_exc(e)
return {"id": str(oid), "error": str(e)}
if script == "commands":
if r["errors"]:
span.set_error(ERR_UNKNOWN, r["output"])
return {"id": str(oid), "error": r["output"]}
span.out_label = r["output"]
return {"id": str(oid), "result": r["output"]}
else:
return {"id": str(oid), "result": r}
async def post(self, *args, **kwargs):
"""
Request is the list of
{
id: <managed object id>,
script: <script name>,
args: <arguments>
}
:param args:
:param kwargs:
:return:
"""
metrics["mrt_requests"] += 1
# Parse request
req = orjson.loads(self.request.body)
# Disable nginx proxy buffering
self.set_header("X-Accel-Buffering", "no")
# Object ids
ids = set(int(d["id"]) for d in req if "id" in d and "script" in d)
logger.info(
"Run task on parralels: %d (Max concurrent %d), for User: %s",
len(req),
config.mrt.max_concurrency,
self.current_user,
)
# Check access
qs = ManagedObject.objects.filter(id__in=list(ids))
if not self.current_user.is_superuser:
adm_domains = UserAccess.get_domains(self.current_user)
qs = qs.filter(administrative_domain__in=adm_domains)
ids = dict(qs.values_list("id", "bi_id"))
with Span(
sample=int(config.mrt.enable_command_logging),
server="MRT",
service="post",
client=self.current_user,
in_label=req,
) as span:
if self.service.use_telemetry:
logger.info(
"[%s] Enable telemetry for task, user: %s", span.span_id, self.current_user
)
futures = set()
for d in req:
if "id" not in d or "script" not in d:
continue
oid = int(d["id"])
if oid not in ids:
await self.write_chunk({"id": str(d["id"]), "error": "Access denied"})
metrics["mrt_access_denied"] += 1
continue
while len(futures) >= config.mrt.max_concurrency:
done, futures = await asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
for f in done:
r = await f
await self.write_chunk(r)
futures.add(
self.run_script(
oid, d["script"], d.get("args"), span_id=span.span_id, bi_id=ids.get(oid)
)
)
# Wait for rest
while futures:
done, futures = await asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
for f in done:
r = await f
await self.write_chunk(r)
logger.info("Done")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.